diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 389c57318409c..c1d11ed0a3d72 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -50,6 +50,7 @@ BWC_VERSION: - "1.3.11" - "1.3.12" - "1.3.13" + - "1.3.14" - "2.0.0" - "2.0.1" - "2.0.2" @@ -73,3 +74,8 @@ BWC_VERSION: - "2.8.1" - "2.9.0" - "2.9.1" + - "2.10.0" + - "2.10.1" + - "2.11.0" + - "2.11.1" + \ No newline at end of file diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 1a108c35429ae..4fa118e8486f1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1 @@ -* @reta @anasalkouz @andrross @reta @Bukhtawar @CEHENKLE @dblock @gbbafna @setiah @kartg @kotwanikunal @mch2 @nknize @owaiskazi19 @Rishikesh1159 @ryanbogan @saratvemulapalli @shwetathareja @dreamer-89 @tlfeng @VachaShah @dbwiddis @sachinpkale @sohami +* @reta @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @gbbafna @setiah @kartg @kotwanikunal @mch2 @nknize @owaiskazi19 @peternied @Rishikesh1159 @ryanbogan @saratvemulapalli @shwetathareja @dreamer-89 @tlfeng @VachaShah @dbwiddis @sachinpkale @sohami @msfroh diff --git a/.github/dependabot.yml b/.github/dependabot.yml index ca972d1b242e3..1f4d309e44a4c 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -599,6 +599,11 @@ updates: package-ecosystem: gradle schedule: interval: weekly + - directory: /plugins/crypto-kms/ + open-pull-requests-limit: 1 + package-ecosystem: gradle + schedule: + interval: weekly - directory: /qa/ open-pull-requests-limit: 1 package-ecosystem: gradle diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 4537cadf71074..7a4119a763b09 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -9,8 +9,10 @@ - [ ] All tests pass - [ ] New functionality has been documented. - [ ] New functionality has javadoc added +- [ ] Failing checks are inspected and point to the corresponding known issue(s) (See: [Troubleshooting Failing Builds](../blob/main/CONTRIBUTING.md#troubleshooting-failing-builds)) - [ ] Commits are signed per the DCO using --signoff - [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) +- [ ] Public documentation issue/PR [created](https://github.com/opensearch-project/documentation-website/issues/new/choose) By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. For more information on following Developer Certificate of Origin and signing off your commits, please check [here](https://github.com/opensearch-project/OpenSearch/blob/main/CONTRIBUTING.md#developer-certificate-of-origin). diff --git a/.github/workflows/assemble.yml b/.github/workflows/assemble.yml new file mode 100644 index 0000000000000..6a66ac5fb5609 --- /dev/null +++ b/.github/workflows/assemble.yml @@ -0,0 +1,26 @@ +name: Gradle Assemble +on: [pull_request] + +jobs: + assemble: + if: github.repository == 'opensearch-project/OpenSearch' + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + steps: + - uses: actions/checkout@v4 + - name: Set up JDK 11 + uses: actions/setup-java@v3 + with: + java-version: 11 + distribution: temurin + - name: Setup docker (missing on MacOS) + if: runner.os == 'macos' + run: | + brew install docker + colima start + sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock + - name: Run Gradle (assemble) + run: | + ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml index b8d3912c5864a..252cbda1392f8 100644 --- a/.github/workflows/auto-release.yml +++ b/.github/workflows/auto-release.yml @@ -14,7 +14,7 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} @@ -22,7 +22,7 @@ jobs: - name: Get tag id: tag uses: dawidd6/action-get-tag@v1 - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - uses: ncipollo/release-action@v1 with: github_token: ${{ steps.github_app_token.outputs.token }} diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 2b95fb2510bdd..e190867fbaab5 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -26,7 +26,7 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index 8060ea93f477a..9456fbf8b4ca0 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -9,7 +9,7 @@ jobs: if: github.repository == 'opensearch-project/OpenSearch' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ github.event.pull_request.head.sha }} diff --git a/.github/workflows/check-compatibility.yml b/.github/workflows/check-compatibility.yml index b5f2ccbae6917..d93f7e73b91e7 100644 --- a/.github/workflows/check-compatibility.yml +++ b/.github/workflows/check-compatibility.yml @@ -11,10 +11,19 @@ jobs: contents: read runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} + - name: Increase swapfile + run: | + sudo swapoff -a + sudo fallocate -l 10G /swapfile + sudo chmod 600 /swapfile + sudo mkswap /swapfile + sudo swapon /swapfile + sudo swapon --show + - name: Run compatibility task run: ./gradlew checkCompatibility -i | tee $HOME/gradlew-check.out @@ -43,8 +52,18 @@ jobs: with: name: results.txt + - name: Find Comment + uses: peter-evans/find-comment@v2 + id: fc + with: + issue-number: ${{ github.event.number }} + comment-author: 'github-actions[bot]' + body-includes: 'Compatibility status:' + - name: Add comment on the PR uses: peter-evans/create-or-update-comment@v3 with: + comment-id: ${{ steps.fc.outputs.comment-id }} issue-number: ${{ github.event.number }} body-path: results.txt + edit-mode: replace diff --git a/.github/workflows/copy-linked-issue-labels.yml b/.github/workflows/copy-linked-issue-labels.yml new file mode 100644 index 0000000000000..33b5e92dc10da --- /dev/null +++ b/.github/workflows/copy-linked-issue-labels.yml @@ -0,0 +1,21 @@ +name: Copy labels from linked issues +on: + pull_request_target: + types: [opened, edited, review_requested, synchronize, reopened, ready_for_review] + +jobs: + copy-issue-labels: + if: github.repository == 'opensearch-project/OpenSearch' + runs-on: ubuntu-latest + permissions: + issues: read + contents: read + pull-requests: write + steps: + - name: copy-issue-labels + uses: michalvankodev/copy-issue-labels@v1.3.0 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + labels-to-exclude: | + untriaged + triaged diff --git a/.github/workflows/create-documentation-issue.yml b/.github/workflows/create-documentation-issue.yml index c81f7355a0d22..df63847f8afca 100644 --- a/.github/workflows/create-documentation-issue.yml +++ b/.github/workflows/create-documentation-issue.yml @@ -14,14 +14,14 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} installation_id: 22958780 - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Edit the issue template run: | diff --git a/.github/workflows/dependabot_pr.yml b/.github/workflows/dependabot_pr.yml index ed98bae8978ed..add38e306e60b 100644 --- a/.github/workflows/dependabot_pr.yml +++ b/.github/workflows/dependabot_pr.yml @@ -11,14 +11,14 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} installation_id: 22958780 - name: Check out code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: token: ${{ steps.github_app_token.outputs.token }} diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 5858a194b8bc3..2d4e86a7a3522 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -23,7 +23,7 @@ jobs: timeout-minutes: 130 steps: - name: Checkout OpenSearch repo - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} @@ -50,7 +50,7 @@ jobs: echo "pr_number=Null" >> $GITHUB_ENV - name: Checkout opensearch-build repo - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: repository: opensearch-project/opensearch-build ref: main @@ -72,13 +72,13 @@ jobs: - name: Upload Coverage Report if: success() - uses: codecov/codecov-action@v2 + uses: codecov/codecov-action@v3 with: files: ./codeCoverage.xml - name: Create Comment Success if: ${{ github.event_name == 'pull_request_target' && success() && env.result == 'SUCCESS' }} - uses: peter-evans/create-or-update-comment@v2 + uses: peter-evans/create-or-update-comment@v3 with: issue-number: ${{ env.pr_number }} body: | @@ -104,7 +104,7 @@ jobs: - name: Create Comment Flaky if: ${{ github.event_name == 'pull_request_target' && success() && env.result != 'SUCCESS' }} - uses: peter-evans/create-or-update-comment@v2 + uses: peter-evans/create-or-update-comment@v3 with: issue-number: ${{ env.pr_number }} body: | @@ -116,7 +116,7 @@ jobs: - name: Create Comment Failure if: ${{ github.event_name == 'pull_request_target' && failure() }} - uses: peter-evans/create-or-update-comment@v2 + uses: peter-evans/create-or-update-comment@v3 with: issue-number: ${{ env.pr_number }} body: | diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 4322a740145e6..ca026f530b4af 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: lychee Link Checker id: lychee uses: lycheeverse/lychee-action@v1.8.0 diff --git a/.github/workflows/lucene-snapshots.yml b/.github/workflows/lucene-snapshots.yml index 994b420cb5847..76981276fe085 100644 --- a/.github/workflows/lucene-snapshots.yml +++ b/.github/workflows/lucene-snapshots.yml @@ -21,7 +21,7 @@ jobs: contents: read steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up JDK 17 uses: actions/setup-java@v3 with: @@ -29,7 +29,7 @@ jobs: distribution: 'adopt' - name: Checkout Lucene - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: repository: 'apache/lucene' path: lucene @@ -38,7 +38,7 @@ jobs: - name: Set hash working-directory: ./lucene run: | - echo "::set-output name=REVISION::$(git rev-parse --short HEAD)" + echo "REVISION=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT id: version - name: Initialize gradle settings diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index 0372d57dda91f..b04f404b11c55 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -1,4 +1,4 @@ -name: Gradle Precommit and Asssemble +name: Gradle Precommit on: [pull_request] jobs: @@ -9,7 +9,7 @@ jobs: matrix: os: [ubuntu-latest, windows-latest, macos-latest] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up JDK 11 uses: actions/setup-java@v3 with: @@ -19,16 +19,3 @@ jobs: - name: Run Gradle (precommit) run: | ./gradlew javadoc precommit --parallel - - name: Setup docker (missing on MacOS) - if: runner.os == 'macos' - run: | - # Workaround for https://github.com/actions/runner-images/issues/8104 - brew remove --ignore-dependencies qemu - curl -o ./qemu.rb https://raw.githubusercontent.com/Homebrew/homebrew-core/f88e30b3a23ef3735580f9b05535ce5a0a03c9e3/Formula/qemu.rb - brew install ./qemu.rb - brew install docker - colima start - sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock - - name: Run Gradle (assemble) - run: | - ./gradlew assemble --parallel diff --git a/.github/workflows/publish-maven-snapshots.yml b/.github/workflows/publish-maven-snapshots.yml index 43c18af78ae4c..8c08df269a999 100644 --- a/.github/workflows/publish-maven-snapshots.yml +++ b/.github/workflows/publish-maven-snapshots.yml @@ -18,7 +18,7 @@ jobs: contents: write steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up JDK 17 uses: actions/setup-java@v3 with: diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index ad79a425557bb..a20c671c137b2 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -13,13 +13,13 @@ jobs: steps: - name: GitHub App token id: github_app_token - uses: tibdex/github-app-token@v1.5.0 + uses: tibdex/github-app-token@v2.1.0 with: app_id: ${{ secrets.APP_ID }} private_key: ${{ secrets.APP_PRIVATE_KEY }} installation_id: 22958780 - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Fetch Tag and Version Information run: | TAG=$(echo "${GITHUB_REF#refs/*/}") @@ -44,7 +44,7 @@ jobs: echo "NEXT_VERSION=$NEXT_VERSION" >> $GITHUB_ENV echo "NEXT_VERSION_UNDERSCORE=$NEXT_VERSION_UNDERSCORE" >> $GITHUB_ENV echo "NEXT_VERSION_ID=$NEXT_VERSION_ID" >> $GITHUB_ENV - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: ref: ${{ env.BASE }} token: ${{ steps.github_app_token.outputs.token }} @@ -59,7 +59,7 @@ jobs: sed -i "s/CURRENT = $CURRENT_VERSION_UNDERSCORE;/CURRENT = $NEXT_VERSION_UNDERSCORE;/g" libs/core/src/main/java/org/opensearch/Version.java - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 + uses: peter-evans/create-pull-request@v5 with: token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE }} @@ -73,7 +73,7 @@ jobs: body: | I've noticed that a new tag ${{ env.TAG }} was pushed, and incremented the version from ${{ env.CURRENT_VERSION }} to ${{ env.NEXT_VERSION }}. - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: ref: ${{ env.BASE_X }} token: ${{ steps.github_app_token.outputs.token }} @@ -86,7 +86,7 @@ jobs: sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 + uses: peter-evans/create-pull-request@v5 with: token: ${{ steps.github_app_token.outputs.token }} base: ${{ env.BASE_X }} @@ -100,7 +100,7 @@ jobs: body: | I've noticed that a new tag ${{ env.TAG }} was pushed, and added a bwc version ${{ env.NEXT_VERSION }}. - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: ref: main token: ${{ steps.github_app_token.outputs.token }} @@ -113,7 +113,7 @@ jobs: sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 + uses: peter-evans/create-pull-request@v5 with: token: ${{ steps.github_app_token.outputs.token }} base: main diff --git a/.github/workflows/wrapper.yml b/.github/workflows/wrapper.yml index 80acaa906711b..6dd48ca15eaa9 100644 --- a/.github/workflows/wrapper.yml +++ b/.github/workflows/wrapper.yml @@ -7,5 +7,5 @@ jobs: if: github.repository == 'opensearch-project/OpenSearch' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - uses: gradle/wrapper-validation-action@v1 diff --git a/CHANGELOG.md b/CHANGELOG.md index eda8fab74e87e..8b4e8d2a67c6b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,98 +5,56 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added -- Add server version as REST response header [#6583](https://github.com/opensearch-project/OpenSearch/issues/6583) -- Start replication checkpointTimers on primary before segments upload to remote store. ([#8221]()https://github.com/opensearch-project/OpenSearch/pull/8221) -- Introduce new static cluster setting to control slice computation for concurrent segment search. ([#8847](https://github.com/opensearch-project/OpenSearch/pull/8884)) -- Add configuration for file cache size to max remote data ratio to prevent oversubscription of file cache ([#8606](https://github.com/opensearch-project/OpenSearch/pull/8606)) -- Disallow compression level to be set for default and best_compression index codecs ([#8737]()https://github.com/opensearch-project/OpenSearch/pull/8737) -- [distribution/archives] [Linux] [x64] Provide the variant of the distributions bundled with JRE ([#8195]()https://github.com/opensearch-project/OpenSearch/pull/8195) -- Prioritize replica shard movement during shard relocation ([#8875](https://github.com/opensearch-project/OpenSearch/pull/8875)) -- Introducing Default and Best Compression codecs as their algorithm name ([#9123]()https://github.com/opensearch-project/OpenSearch/pull/9123) -- Make SearchTemplateRequest implement IndicesRequest.Replaceable ([#9122]()https://github.com/opensearch-project/OpenSearch/pull/9122) -- [BWC and API enforcement] Define the initial set of annotations, their meaning and relations between them ([#9223](https://github.com/opensearch-project/OpenSearch/pull/9223)) -- [Remote Store] Add Segment download stats to remotestore stats API ([#8718](https://github.com/opensearch-project/OpenSearch/pull/8718)) -- [Remote Store] Add remote segment transfer stats on NodesStats API ([#9168](https://github.com/opensearch-project/OpenSearch/pull/9168)) -- [Segment Replication] Support realtime reads for GET requests ([#9212](https://github.com/opensearch-project/OpenSearch/pull/9212)) -- Allow test clusters to run with TLS ([#8900](https://github.com/opensearch-project/OpenSearch/pull/8900)) -- Add jdk.incubator.vector module support for JDK 20+ ([#8601](https://github.com/opensearch-project/OpenSearch/pull/8601)) -- [Feature] Expose term frequency in Painless script score context ([#9081](https://github.com/opensearch-project/OpenSearch/pull/9081)) -- Add support for reading partial files to HDFS repository ([#9513](https://github.com/opensearch-project/OpenSearch/issues/9513)) +- [Admission control] Add Resource usage collector service and resource usage tracker ([#10695](https://github.com/opensearch-project/OpenSearch/pull/10695)) +- [Admission control] Add enhancements to FS stats to include read/write time, queue size and IO time ([#10696](https://github.com/opensearch-project/OpenSearch/pull/10696)) +- [Remote cluster state] Change file names for remote cluster state ([#10557](https://github.com/opensearch-project/OpenSearch/pull/10557)) +- [Remote Store] Add repository stats for remote store([#10567](https://github.com/opensearch-project/OpenSearch/pull/10567)) +- [Remote cluster state] Upload global metadata in cluster state to remote store([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +- [Remote cluster state] Download functionality of global metadata from remote store ([#10535](https://github.com/opensearch-project/OpenSearch/pull/10535)) +- [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404)) +- [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814)) +- Add search query categorizor ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) +- Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) +- Added cluster setting cluster.restrict.index.replication_type to restrict setting of index setting replication type ([#10866](https://github.com/opensearch-project/OpenSearch/pull/10866)) +- Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) +- [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) +- Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247)) +- Add support for query profiler with concurrent aggregation ([#9248](https://github.com/opensearch-project/OpenSearch/pull/9248)) +- Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352)) ### Dependencies -- Bump `org.apache.logging.log4j:log4j-core` from 2.17.1 to 2.20.0 ([#8307](https://github.com/opensearch-project/OpenSearch/pull/8307)) -- Bump `io.grpc:grpc-context` from 1.46.0 to 1.57.1 ([#8726](https://github.com/opensearch-project/OpenSearch/pull/8726), [#9145](https://github.com/opensearch-project/OpenSearch/pull/9145)) -- Bump `com.netflix.nebula:gradle-info-plugin` from 12.1.5 to 12.1.6 ([#8724](https://github.com/opensearch-project/OpenSearch/pull/8724)) -- Bump `commons-codec:commons-codec` from 1.15 to 1.16.0 ([#8725](https://github.com/opensearch-project/OpenSearch/pull/8725)) -- Bump `org.apache.zookeeper:zookeeper` from 3.8.1 to 3.9.0 ([#8844](https://github.com/opensearch-project/OpenSearch/pull/8844), [#9146](https://github.com/opensearch-project/OpenSearch/pull/9146)) -- Bump `org.gradle.test-retry` from 1.5.3 to 1.5.4 ([#8842](https://github.com/opensearch-project/OpenSearch/pull/8842)) -- Bump `com.netflix.nebula.ospackage-base` from 11.3.0 to 11.4.0 ([#8838](https://github.com/opensearch-project/OpenSearch/pull/8838)) -- Bump `com.google.http-client:google-http-client-gson` from 1.43.2 to 1.43.3 ([#8840](https://github.com/opensearch-project/OpenSearch/pull/8840)) -- OpenJDK Update (July 2023 Patch releases) ([#8869](https://github.com/opensearch-project/OpenSearch/pull/8869)) -- Bump `hadoop` libraries from 3.3.4 to 3.3.6 ([#6995](https://github.com/opensearch-project/OpenSearch/pull/6995)) -- Bump `com.gradle.enterprise` from 3.13.3 to 3.14.1 ([#8996](https://github.com/opensearch-project/OpenSearch/pull/8996)) -- Bump `org.apache.commons:commons-lang3` from 3.12.0 to 3.13.0 ([#8995](https://github.com/opensearch-project/OpenSearch/pull/8995)) -- Bump `com.google.cloud:google-cloud-core-http` from 2.21.0 to 2.21.1 ([#8999](https://github.com/opensearch-project/OpenSearch/pull/8999)) -- Bump `com.maxmind.geoip2:geoip2` from 4.0.1 to 4.1.0 ([#8998](https://github.com/opensearch-project/OpenSearch/pull/8998)) -- Bump `org.apache.commons:commons-lang3` from 3.12.0 to 3.13.0 in /plugins/repository-hdfs ([#8997](https://github.com/opensearch-project/OpenSearch/pull/8997)) -- Bump `netty` from 4.1.94.Final to 4.1.96.Final ([#9030](https://github.com/opensearch-project/OpenSearch/pull/9030)) -- Bump `com.google.jimfs:jimfs` from 1.2 to 1.3.0 ([#9080](https://github.com/opensearch-project/OpenSearch/pull/9080)) -- Bump `io.projectreactor.netty:reactor-netty-http` from 1.1.8 to 1.1.9 ([#9147](https://github.com/opensearch-project/OpenSearch/pull/9147)) -- Bump `org.apache.maven:maven-model` from 3.9.3 to 3.9.4 ([#9148](https://github.com/opensearch-project/OpenSearch/pull/9148)) -- Bump `com.azure:azure-storage-blob` from 12.22.3 to 12.23.0 ([#9231](https://github.com/opensearch-project/OpenSearch/pull/9231)) -- Bump `com.diffplug.spotless` from 6.19.0 to 6.20.0 ([#9227](https://github.com/opensearch-project/OpenSearch/pull/9227)) -- Bump `org.xerial.snappy:snappy-java` from 1.1.8.2 to 1.1.10.3 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) -- Bump `com.squareup.okhttp3:okhttp` from 4.9.3 to 4.11.0 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) -- Bump `com.squareup.okio:okio` from 2.8.0 to 3.5.0 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) -- Bump `com.google.code.gson:gson` from 2.9.0 to 2.10.1 ([#9230](https://github.com/opensearch-project/OpenSearch/pull/9230)) -- Bump `lycheeverse/lychee-action` from 1.2.0 to 1.8.0 ([#9228](https://github.com/opensearch-project/OpenSearch/pull/9228)) -- Bump `snakeyaml` from 2.0 to 2.1 ([#9269](https://github.com/opensearch-project/OpenSearch/pull/9269)) -- Bump `aws-actions/configure-aws-credentials` from 1 to 2 ([#9302](https://github.com/opensearch-project/OpenSearch/pull/9302)) -- Bump `com.github.luben:zstd-jni` from 1.5.5-3 to 1.5.5-5 ([#9431](https://github.com/opensearch-project/OpenSearch/pull/9431) +- Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) +- Bump `netty` from 4.1.99.Final to 4.1.100.Final ([#10564](https://github.com/opensearch-project/OpenSearch/pull/10564)) +- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) +- Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294)) +- Bump `com.google.api.grpc:proto-google-common-protos` from 2.25.0 to 2.25.1 ([#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) +- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) +- Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) +- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) +- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.21.1 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858), [#11000](https://github.com/opensearch-project/OpenSearch/pull/11000)) ### Changed -- Default to mmapfs within hybridfs ([#8508](https://github.com/opensearch-project/OpenSearch/pull/8508)) -- Perform aggregation postCollection in ContextIndexSearcher after searching leaves ([#8303](https://github.com/opensearch-project/OpenSearch/pull/8303)) -- Make Span exporter configurable ([#8620](https://github.com/opensearch-project/OpenSearch/issues/8620)) -- Perform aggregation postCollection in ContextIndexSearcher after searching leaves ([#8303](https://github.com/opensearch-project/OpenSearch/pull/8303)) -- [Refactor] StreamIO from common to core.common namespace in core lib ([#8157](https://github.com/opensearch-project/OpenSearch/pull/8157)) -- [Refactor] Remaining HPPC to java.util collections ([#8730](https://github.com/opensearch-project/OpenSearch/pull/8730)) -- Remote Segment Store Repository setting moved from `index.remote_store.repository` to `index.remote_store.segment.repository` and `cluster.remote_store.repository` to `cluster.remote_store.segment.repository` respectively for Index and Cluster level settings ([#8719](https://github.com/opensearch-project/OpenSearch/pull/8719)) -- Change InternalSignificantTerms to sum shard-level superset counts only in final reduce ([#8735](https://github.com/opensearch-project/OpenSearch/pull/8735)) -- Exclude 'benchmarks' from codecov report ([#8805](https://github.com/opensearch-project/OpenSearch/pull/8805)) -- Create separate SourceLookup instance per segment slice in SignificantTextAggregatorFactory ([#8807](https://github.com/opensearch-project/OpenSearch/pull/8807)) -- Replace the deprecated IndexReader APIs with new storedFields() & termVectors() ([#7792](https://github.com/opensearch-project/OpenSearch/pull/7792)) -- [Remote Store] Add support to restore only unassigned shards of an index ([#8792](https://github.com/opensearch-project/OpenSearch/pull/8792)) -- Add safeguard limits for file cache during node level allocation ([#8208](https://github.com/opensearch-project/OpenSearch/pull/8208)) -- Add support for aggregation profiler with concurrent aggregation ([#8801](https://github.com/opensearch-project/OpenSearch/pull/8801)) -- [Remove] Deprecated Fractional ByteSizeValue support #9005 ([#9005](https://github.com/opensearch-project/OpenSearch/pull/9005)) -- Add support for aggregation profiler with concurrent aggregation ([#8801](https://github.com/opensearch-project/OpenSearch/pull/8801)) -- [Remote Store] Restrict user override for remote store index level settings ([#8812](https://github.com/opensearch-project/OpenSearch/pull/8812)) -- [Refactor] MediaTypeParser to MediaTypeParserRegistry ([#8636](https://github.com/opensearch-project/OpenSearch/pull/8636)) -- Make MultiBucketConsumerService thread safe to use across slices during search ([#9047](https://github.com/opensearch-project/OpenSearch/pull/9047)) -- Removed blocking wait in TransportGetSnapshotsAction which was exhausting generic threadpool ([#8377](https://github.com/opensearch-project/OpenSearch/pull/8377)) -- Adds support for tracing runnable scenarios ([#8831](https://github.com/opensearch-project/OpenSearch/pull/8831)) -- Change shard_size and shard_min_doc_count evaluation to happen in shard level reduce phase ([#9085](https://github.com/opensearch-project/OpenSearch/pull/9085)) -- Add attributes to startSpan methods ([#9199](https://github.com/opensearch-project/OpenSearch/pull/9199)) -- [Refactor] Task foundation classes to core library - pt 1 ([#9082](https://github.com/opensearch-project/OpenSearch/pull/9082)) -- Add support for wrapping CollectorManager with profiling during concurrent execution ([#9129](https://github.com/opensearch-project/OpenSearch/pull/9129)) -- Add base class for parameterizing the search based tests #9083 ([#9083](https://github.com/opensearch-project/OpenSearch/pull/9083)) -- Add support for wrapping CollectorManager with profiling during concurrent execution ([#9129](https://github.com/opensearch-project/OpenSearch/pull/9129)) -- Rethrow OpenSearch exception for non-concurrent path while using concurrent search ([#9177](https://github.com/opensearch-project/OpenSearch/pull/9177)) -- Improve performance of encoding composite keys in multi-term aggregations ([#9412](https://github.com/opensearch-project/OpenSearch/pull/9412)) -- Refactor Compressors from CompressorFactory to CompressorRegistry for extensibility ([#9262](https://github.com/opensearch-project/OpenSearch/pull/9262)) -- Fix sort related ITs for concurrent search ([#9177](https://github.com/opensearch-project/OpenSearch/pull/9466) +- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) +- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) +- Backport the PR #9107 for updating CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY setting to a dynamic setting ([#10606](https://github.com/opensearch-project/OpenSearch/pull/10606)) +- Search pipelines now support asynchronous request and response processors to avoid blocking on a transport thread ([#10598](https://github.com/opensearch-project/OpenSearch/pull/10598)) +- [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) +- [BUG] Fix java.lang.SecurityException in repository-gcs plugin ([#10642](https://github.com/opensearch-project/OpenSearch/pull/10642)) +- Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395)) +- Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558)) +- Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273)) +- Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) +- [BUG] Disable sort optimization for HALF_FLOAT ([#10999](https://github.com/opensearch-project/OpenSearch/pull/10999)) ### Deprecated ### Removed -- Remove provision to create Remote Indices without Remote Translog Store ([#8719](https://github.com/opensearch-project/OpenSearch/pull/8719)) +- Remove deprecated classes for Rounding ([#10956](https://github.com/opensearch-project/OpenSearch/issues/10956)) ### Fixed -- Fix flaky ResourceAwareTasksTests.testBasicTaskResourceTracking test ([#8993](https://github.com/opensearch-project/OpenSearch/pull/8993)) -- Fix memory leak when using Zstd Dictionary ([#9403](https://github.com/opensearch-project/OpenSearch/pull/9403)) -- Fix condition to remove index create block ([#9437](https://github.com/opensearch-project/OpenSearch/pull/9437)) +- Fix class_cast_exception when passing int to _version and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) ### Security -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.10...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d379d78829318..4a1162cf2558b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,6 +8,7 @@ - [Developer Certificate of Origin](#developer-certificate-of-origin) - [Changelog](#changelog) - [Review Process](#review-process) + - [Troubleshooting Failing Builds](#troubleshooting-failing-builds) # Contributing to OpenSearch @@ -162,3 +163,14 @@ During the PR process, expect that there will be some back-and-forth. Please try If we accept the PR, a [maintainer](MAINTAINERS.md) will merge your change and usually take care of backporting it to appropriate branches ourselves. If we reject the PR, we will close the pull request with a comment explaining why. This decision isn't always final: if you feel we have misunderstood your intended change or otherwise think that we should reconsider then please continue the conversation with a comment on the PR and we'll do our best to address any further points you raise. + +## Troubleshooting Failing Builds + +The OpenSearch testing framework offers many capabilities but exhibits significant complexity (it does lot of randomization internally to cover as many edge cases and variations as possible). Unfortunately, this posses a challenge by making it harder to discover important issues/bugs in straightforward way and may lead to so called flaky tests - the tests which flip randomly from success to failure without any code changes. + +If your pull request reports a failing test(s) on one of the checks, please: + - look if there is an existing [issue](https://github.com/opensearch-project/OpenSearch/issues) reported for the test in question + - if not, please make sure this is not caused by your changes, run the failing test(s) locally for some time + - if you are sure the failure is not related, please open a new [bug](https://github.com/opensearch-project/OpenSearch/issues/new?assignees=&labels=bug%2C+untriaged&projects=&template=bug_template.md&title=%5BBUG%5D) with `flaky-test` label + - add a comment referencing the issue(s) or bug report(s) to your pull request explaining the failing build(s) + - as a bonus point, try to contribute by fixing the flaky test(s) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 95e87f4be43bf..42a8a439445ca 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -6,6 +6,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Maintainer | GitHub ID | Affiliation | |--------------------------| ------------------------------------------------------- | ----------- | +| Abbas Hussain | [abbashus](https://github.com/abbashus) | Meta | | Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | | Andrew Ross | [andrross](https://github.com/andrross) | Amazon | | Andriy Redko | [reta](https://github.com/reta) | Aiven | @@ -18,8 +19,11 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon | | Kunal Kotwani | [kotwanikunal](https://github.com/kotwanikunal) | Amazon | | Marc Handalian | [mch2](https://github.com/mch2) | Amazon | +| Michael Froh | [msfroh](https://github.com/msfroh) | Amazon | | Nick Knize | [nknize](https://github.com/nknize) | Amazon | | Owais Kazi | [owaiskazi19](https://github.com/owaiskazi19) | Amazon | +| Peter Nied | [peternied](https://github.com/peternied) | Amazon | +| Rabi Panda | [adnapibar](https://github.com/adnapibar) | Independent | | Rishikesh Pasham | [Rishikesh1159](https://github.com/Rishikesh1159) | Amazon | | Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | | Sachin Kale | [sachinpkale](https://github.com/sachinpkale) | Amazon | @@ -33,8 +37,6 @@ This document contains a list of maintainers in this repo. See [opensearch-proje ## Emeritus | Maintainer | GitHub ID | Affiliation | -|-------------------------|---------------------------------------------| ----------- | -| Abbas Hussain | [abbashus](https://github.com/abbashus) | Amazon | +|-------------------------|---------------------------------------------|-------------| | Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | -| Rabi Panda | [adnapibar](https://github.com/adnapibar) | Amazon | | Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/index/mapper/CustomBinaryDocValuesFieldBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/index/mapper/CustomBinaryDocValuesFieldBenchmark.java new file mode 100644 index 0000000000000..7307bec088d02 --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/benchmark/index/mapper/CustomBinaryDocValuesFieldBenchmark.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.benchmark.index.mapper; + +import org.apache.lucene.util.BytesRef; +import org.opensearch.index.mapper.BinaryFieldMapper; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; + +@Warmup(iterations = 1) +@Measurement(iterations = 1) +@Fork(1) +@BenchmarkMode(Mode.Throughput) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@State(Scope.Thread) +@SuppressWarnings("unused") // invoked by benchmarking framework +public class CustomBinaryDocValuesFieldBenchmark { + + static final String FIELD_NAME = "dummy"; + static final String SEED_VALUE = "seed"; + + @Benchmark + public void add(CustomBinaryDocValuesFieldBenchmark.BenchmarkParameters parameters, Blackhole blackhole) { + // Don't use the parameter binary doc values object. + // Start with a fresh object every call and add maximum number of entries + BinaryFieldMapper.CustomBinaryDocValuesField customBinaryDocValuesField = new BinaryFieldMapper.CustomBinaryDocValuesField( + FIELD_NAME, + new BytesRef(SEED_VALUE).bytes + ); + for (int i = 0; i < parameters.maximumNumberOfEntries; ++i) { + ThreadLocalRandom.current().nextBytes(parameters.bytes); + customBinaryDocValuesField.add(parameters.bytes); + } + } + + @Benchmark + public void binaryValue(CustomBinaryDocValuesFieldBenchmark.BenchmarkParameters parameters, Blackhole blackhole) { + blackhole.consume(parameters.customBinaryDocValuesField.binaryValue()); + } + + @State(Scope.Benchmark) + public static class BenchmarkParameters { + @Param({ "8", "32", "128", "512" }) + int maximumNumberOfEntries; + + @Param({ "8", "32", "128", "512" }) + int entrySize; + + BinaryFieldMapper.CustomBinaryDocValuesField customBinaryDocValuesField; + byte[] bytes; + + @Setup + public void setup() { + customBinaryDocValuesField = new BinaryFieldMapper.CustomBinaryDocValuesField(FIELD_NAME, new BytesRef(SEED_VALUE).bytes); + bytes = new byte[entrySize]; + for (int i = 0; i < maximumNumberOfEntries; ++i) { + ThreadLocalRandom.current().nextBytes(bytes); + customBinaryDocValuesField.add(bytes); + } + } + } +} diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/time/RoundingBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/time/RoundingBenchmark.java deleted file mode 100644 index cdbcbfc163191..0000000000000 --- a/benchmarks/src/main/java/org/opensearch/benchmark/time/RoundingBenchmark.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.benchmark.time; - -import org.opensearch.common.Rounding; -import org.opensearch.common.rounding.DateTimeUnit; -import org.opensearch.common.time.DateUtils; -import org.opensearch.common.unit.TimeValue; -import org.joda.time.DateTimeZone; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.BenchmarkMode; -import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.OutputTimeUnit; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.Warmup; - -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.util.concurrent.TimeUnit; - -import static org.opensearch.common.Rounding.DateTimeUnit.DAY_OF_MONTH; -import static org.opensearch.common.Rounding.DateTimeUnit.MONTH_OF_YEAR; -import static org.opensearch.common.Rounding.DateTimeUnit.QUARTER_OF_YEAR; -import static org.opensearch.common.Rounding.DateTimeUnit.YEAR_OF_CENTURY; - -@Fork(3) -@Warmup(iterations = 10) -@Measurement(iterations = 10) -@BenchmarkMode(Mode.AverageTime) -@OutputTimeUnit(TimeUnit.NANOSECONDS) -@State(Scope.Benchmark) -@SuppressWarnings("unused") // invoked by benchmarking framework -public class RoundingBenchmark { - - private final ZoneId zoneId = ZoneId.of("Europe/Amsterdam"); - private final DateTimeZone timeZone = DateUtils.zoneIdToDateTimeZone(zoneId); - - private long timestamp = 1548879021354L; - - private final org.opensearch.common.rounding.Rounding jodaRounding = org.opensearch.common.rounding.Rounding.builder( - DateTimeUnit.HOUR_OF_DAY - ).timeZone(timeZone).build(); - private final Rounding javaRounding = Rounding.builder(Rounding.DateTimeUnit.HOUR_OF_DAY).timeZone(zoneId).build(); - - @Benchmark - public long timeRoundingDateTimeUnitJoda() { - return jodaRounding.round(timestamp); - } - - @Benchmark - public long timeRoundingDateTimeUnitJava() { - return javaRounding.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding jodaDayOfMonthRounding = org.opensearch.common.rounding.Rounding.builder( - DateTimeUnit.DAY_OF_MONTH - ).timeZone(timeZone).build(); - private final Rounding javaDayOfMonthRounding = Rounding.builder(DAY_OF_MONTH).timeZone(zoneId).build(); - - @Benchmark - public long timeRoundingDateTimeUnitDayOfMonthJoda() { - return jodaDayOfMonthRounding.round(timestamp); - } - - @Benchmark - public long timeRoundingDateTimeUnitDayOfMonthJava() { - return javaDayOfMonthRounding.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeIntervalRoundingJoda = org.opensearch.common.rounding.Rounding.builder( - TimeValue.timeValueMinutes(60) - ).timeZone(timeZone).build(); - private final Rounding timeIntervalRoundingJava = Rounding.builder(TimeValue.timeValueMinutes(60)).timeZone(zoneId).build(); - - @Benchmark - public long timeIntervalRoundingJava() { - return timeIntervalRoundingJava.round(timestamp); - } - - @Benchmark - public long timeIntervalRoundingJoda() { - return timeIntervalRoundingJoda.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeUnitRoundingUtcDayOfMonthJoda = org.opensearch.common.rounding.Rounding - .builder(DateTimeUnit.DAY_OF_MONTH) - .timeZone(DateTimeZone.UTC) - .build(); - private final Rounding timeUnitRoundingUtcDayOfMonthJava = Rounding.builder(DAY_OF_MONTH).timeZone(ZoneOffset.UTC).build(); - - @Benchmark - public long timeUnitRoundingUtcDayOfMonthJava() { - return timeUnitRoundingUtcDayOfMonthJava.round(timestamp); - } - - @Benchmark - public long timeUnitRoundingUtcDayOfMonthJoda() { - return timeUnitRoundingUtcDayOfMonthJoda.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeUnitRoundingUtcQuarterOfYearJoda = org.opensearch.common.rounding.Rounding - .builder(DateTimeUnit.QUARTER) - .timeZone(DateTimeZone.UTC) - .build(); - private final Rounding timeUnitRoundingUtcQuarterOfYearJava = Rounding.builder(QUARTER_OF_YEAR).timeZone(ZoneOffset.UTC).build(); - - @Benchmark - public long timeUnitRoundingUtcQuarterOfYearJava() { - return timeUnitRoundingUtcQuarterOfYearJava.round(timestamp); - } - - @Benchmark - public long timeUnitRoundingUtcQuarterOfYearJoda() { - return timeUnitRoundingUtcQuarterOfYearJoda.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeUnitRoundingUtcMonthOfYearJoda = org.opensearch.common.rounding.Rounding - .builder(DateTimeUnit.MONTH_OF_YEAR) - .timeZone(DateTimeZone.UTC) - .build(); - private final Rounding timeUnitRoundingUtcMonthOfYearJava = Rounding.builder(MONTH_OF_YEAR).timeZone(ZoneOffset.UTC).build(); - - @Benchmark - public long timeUnitRoundingUtcMonthOfYearJava() { - return timeUnitRoundingUtcMonthOfYearJava.round(timestamp); - } - - @Benchmark - public long timeUnitRoundingUtcMonthOfYearJoda() { - return timeUnitRoundingUtcMonthOfYearJoda.round(timestamp); - } - - private final org.opensearch.common.rounding.Rounding timeUnitRoundingUtcYearOfCenturyJoda = org.opensearch.common.rounding.Rounding - .builder(DateTimeUnit.YEAR_OF_CENTURY) - .timeZone(DateTimeZone.UTC) - .build(); - private final Rounding timeUnitRoundingUtcYearOfCenturyJava = Rounding.builder(YEAR_OF_CENTURY).timeZone(ZoneOffset.UTC).build(); - - @Benchmark - public long timeUnitRoundingUtcYearOfCenturyJava() { - return timeUnitRoundingUtcYearOfCenturyJava.round(timestamp); - } - - @Benchmark - public long timeUnitRoundingUtcYearOfCenturyJoda() { - return timeUnitRoundingUtcYearOfCenturyJoda.round(timestamp); - } -} diff --git a/benchmarks/src/main/java/org/opensearch/common/ArrayRoundingBenchmark.java b/benchmarks/src/main/java/org/opensearch/common/ArrayRoundingBenchmark.java new file mode 100644 index 0000000000000..64c0a9e1d7aa6 --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/common/ArrayRoundingBenchmark.java @@ -0,0 +1,147 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common; + +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.Random; +import java.util.function.Supplier; + +@Fork(value = 3) +@Warmup(iterations = 3, time = 1) +@Measurement(iterations = 1, time = 1) +@BenchmarkMode(Mode.Throughput) +public class ArrayRoundingBenchmark { + + @Benchmark + public void round(Blackhole bh, Options opts) { + Rounding.Prepared rounding = opts.supplier.get(); + for (long key : opts.queries) { + bh.consume(rounding.round(key)); + } + } + + @State(Scope.Benchmark) + public static class Options { + @Param({ + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "12", + "14", + "16", + "18", + "20", + "22", + "24", + "26", + "29", + "32", + "37", + "41", + "45", + "49", + "54", + "60", + "64", + "74", + "83", + "90", + "98", + "108", + "118", + "128", + "144", + "159", + "171", + "187", + "204", + "229", + "256" }) + public Integer size; + + @Param({ "binary", "linear" }) + public String type; + + @Param({ "uniform", "skewed_edge", "skewed_center" }) + public String distribution; + + public long[] queries; + public Supplier supplier; + + @Setup + public void setup() { + Random random = new Random(size); + long[] values = new long[size]; + for (int i = 1; i < values.length; i++) { + values[i] = values[i - 1] + 100; + } + + long range = values[values.length - 1] - values[0] + 100; + long mean, stddev; + queries = new long[1000000]; + + switch (distribution) { + case "uniform": // all values equally likely. + for (int i = 0; i < queries.length; i++) { + queries[i] = values[0] + (nextPositiveLong(random) % range); + } + break; + case "skewed_edge": // distribution centered at p90 with ± 5% stddev. + mean = values[0] + (long) (range * 0.9); + stddev = (long) (range * 0.05); + for (int i = 0; i < queries.length; i++) { + queries[i] = Math.max(values[0], mean + (long) (random.nextGaussian() * stddev)); + } + break; + case "skewed_center": // distribution centered at p50 with ± 5% stddev. + mean = values[0] + (long) (range * 0.5); + stddev = (long) (range * 0.05); + for (int i = 0; i < queries.length; i++) { + queries[i] = Math.max(values[0], mean + (long) (random.nextGaussian() * stddev)); + } + break; + default: + throw new IllegalArgumentException("invalid distribution: " + distribution); + } + + switch (type) { + case "binary": + supplier = () -> new Rounding.BinarySearchArrayRounding(values, size, null); + break; + case "linear": + supplier = () -> new Rounding.BidirectionalLinearSearchArrayRounding(values, size, null); + break; + default: + throw new IllegalArgumentException("invalid type: " + type); + } + } + + private static long nextPositiveLong(Random random) { + return random.nextLong() & Long.MAX_VALUE; + } + } +} diff --git a/benchmarks/src/main/java/org/opensearch/common/hash/HashFunctionBenchmark.java b/benchmarks/src/main/java/org/opensearch/common/hash/HashFunctionBenchmark.java new file mode 100644 index 0000000000000..8842337a468a1 --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/common/hash/HashFunctionBenchmark.java @@ -0,0 +1,172 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import org.apache.lucene.util.StringHelper; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.Random; + +@Fork(value = 3) +@Warmup(iterations = 1, time = 1) +@Measurement(iterations = 3, time = 3) +@BenchmarkMode(Mode.Throughput) +public class HashFunctionBenchmark { + + @Benchmark + public void hash(Blackhole bh, Options opts) { + bh.consume(opts.type.hash(opts.data)); + } + + @State(Scope.Benchmark) + public static class Options { + @Param({ "MURMUR3", "T1HA1" }) + public Type type; + + @Param({ + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "12", + "14", + "16", + "18", + "21", + "24", + "28", + "32", + "36", + "41", + "47", + "54", + "62", + "71", + "81", + "90", + "100", + "112", + "125", + "139", + "156", + "174", + "194", + "220", + "245", + "272", + "302", + "339", + "384", + "431", + "488", + "547", + "608", + "675", + "763", + "863", + "967", + "1084", + "1225", + "1372", + "1537", + "1737", + "1929", + "2142", + "2378", + "2664", + "3011", + "3343", + "3778", + "4232", + "4783", + "5310", + "5895", + "6662", + "7529", + "8508", + "9444", + "10483", + "11741", + "13150", + "14597", + "16495", + "18475", + "20877", + "23383", + "25956", + "29071", + "32560", + "36142", + "40841", + "46151", + "52151", + "57888", + "65414", + "72610", + "82050", + "91076", + "102006", + "114247", + "127957", + "143312", + "159077", + "176576", + "199531", + "223475", + "250292", + "277825", + "313943", + "351617", + "393812" }) + public Integer length; + public byte[] data; + + @Setup + public void setup() { + data = new byte[length]; + new Random(0).nextBytes(data); + } + } + + public enum Type { + MURMUR3((data, offset, length) -> StringHelper.murmurhash3_x86_32(data, offset, length, 0)), + T1HA1((data, offset, length) -> T1ha1.hash(data, offset, length, 0)); + + private final Hasher hasher; + + Type(Hasher hasher) { + this.hasher = hasher; + } + + public long hash(byte[] data) { + return hasher.hash(data, 0, data.length); + } + } + + @FunctionalInterface + interface Hasher { + long hash(byte[] data, int offset, int length); + } +} diff --git a/benchmarks/src/main/java/org/opensearch/common/util/BytesRefHashBenchmark.java b/benchmarks/src/main/java/org/opensearch/common/util/BytesRefHashBenchmark.java new file mode 100644 index 0000000000000..fef12b6d9f84a --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/common/util/BytesRefHashBenchmark.java @@ -0,0 +1,249 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.StringHelper; +import org.opensearch.common.hash.T1ha1; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.util.HashSet; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import java.util.stream.Stream; + +@Fork(value = 3) +@Warmup(iterations = 1, time = 2) +@Measurement(iterations = 3, time = 5) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +public class BytesRefHashBenchmark { + private static final int NUM_TABLES = 20; // run across many tables so that caches aren't effective + private static final int NUM_HITS = 1_000_000; // num hits per table + + @Benchmark + public void add(Blackhole bh, Options opts) { + HashTable[] tables = Stream.generate(opts.type::create).limit(NUM_TABLES).toArray(HashTable[]::new); + + for (int hit = 0; hit < NUM_HITS; hit++) { + BytesRef key = opts.keys[hit % opts.keys.length]; + for (HashTable table : tables) { + bh.consume(table.add(key)); + } + } + + Releasables.close(tables); + } + + @State(Scope.Benchmark) + public static class Options { + @Param({ "MURMUR3", "T1HA1" }) + public Type type; + + @Param({ + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "10", + "12", + "14", + "16", + "19", + "22", + "25", + "29", + "33", + "38", + "43", + "50", + "57", + "65", + "75", + "86", + "97", + "109", + "124", + "141", + "161", + "182", + "204", + "229", + "262", + "297", + "336", + "380", + "430", + "482", + "550", + "610", + "704", + "801", + "914", + "1042", + "1178", + "1343", + "1532", + "1716", + "1940", + "2173", + "2456", + "2751", + "3082", + "3514", + "4006", + "4487", + "5026", + "5730", + "6418", + "7317", + "8196", + "9180", + "10374", + "11723", + "13247", + "14837", + "16915", + "19114", + "21599", + "24623", + "28071", + "32001", + "36482", + "41590", + "46581", + "52637", + "58954", + "67208", + "76618", + "86579", + "97835", + "109576", + "122726", + "138681", + "156710", + "175516", + "198334", + "222135", + "248792", + "281135", + "320494", + "365364", + "409208", + "466498", + "527143", + "595672", + "667153", + "753883", + "851888", + "971153" }) + public Integer size; + + @Param({ "5", "28", "59", "105" }) + public Integer length; + + private BytesRef[] keys; + + @Setup + public void setup() { + assert size <= Math.pow(26, length) : "key length too small to generate the required number of keys"; + // Seeding with size will help produce deterministic results for the same size, and avoid similar + // looking clusters for different sizes, in case one hash function got unlucky. + Random random = new Random(size); + Set seen = new HashSet<>(); + keys = new BytesRef[size]; + for (int i = 0; i < size; i++) { + BytesRef key; + do { + key = new BytesRef( + random.ints(97, 123) + .limit(length) + .collect(StringBuilder::new, StringBuilder::appendCodePoint, StringBuilder::append) + .toString() + ); + } while (seen.contains(key)); + keys[i] = key; + seen.add(key); + } + } + } + + public enum Type { + MURMUR3(() -> new HashTable() { + private final BytesRefHash table = new BytesRefHash(1, 0.6f, key -> { + // Repeating the lower bits into upper bits to make the fingerprint work. + // Alternatively, use a 64-bit murmur3 hash, but that won't represent the baseline. + long h = StringHelper.murmurhash3_x86_32(key.bytes, key.offset, key.length, 0) & 0xFFFFFFFFL; + return h | (h << 32); + }, BigArrays.NON_RECYCLING_INSTANCE); + + @Override + public long add(BytesRef key) { + return table.add(key); + } + + @Override + public void close() { + table.close(); + } + }), + + T1HA1(() -> new HashTable() { + private final BytesRefHash table = new BytesRefHash( + 1, + 0.6f, + key -> T1ha1.hash(key.bytes, key.offset, key.length, 0), + BigArrays.NON_RECYCLING_INSTANCE + ); + + @Override + public long add(BytesRef key) { + return table.add(key); + } + + @Override + public void close() { + table.close(); + } + }); + + private final Supplier supplier; + + Type(Supplier supplier) { + this.supplier = supplier; + } + + public HashTable create() { + return supplier.get(); + } + } + + interface HashTable extends Releasable { + long add(BytesRef key); + } +} diff --git a/build.gradle b/build.gradle index cecfb73b217b7..4b23a8d07dddd 100644 --- a/build.gradle +++ b/build.gradle @@ -493,6 +493,7 @@ subprojects { includeClasses.add("org.opensearch.index.reindex.DeleteByQueryBasicTests") includeClasses.add("org.opensearch.index.reindex.UpdateByQueryBasicTests") includeClasses.add("org.opensearch.index.shard.IndexShardIT") + includeClasses.add("org.opensearch.index.shard.RemoteIndexShardTests") includeClasses.add("org.opensearch.index.shard.RemoteStoreRefreshListenerTests") includeClasses.add("org.opensearch.index.translog.RemoteFSTranslogTests") includeClasses.add("org.opensearch.indices.DateMathIndexExpressionsIntegrationIT") diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 0e2ca8060587f..111ce6ef18d12 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -103,7 +103,7 @@ dependencies { api localGroovy() api 'commons-codec:commons-codec:1.16.0' - api 'org.apache.commons:commons-compress:1.23.0' + api 'org.apache.commons:commons-compress:1.24.0' api 'org.apache.ant:ant:1.10.13' api 'com.netflix.nebula:gradle-extra-configurations-plugin:10.0.0' api 'com.netflix.nebula:nebula-publishing-plugin:20.3.0' @@ -114,7 +114,7 @@ dependencies { api 'com.github.johnrengelman:shadow:8.1.1' api 'org.jdom:jdom2:2.0.6.1' api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${props.getProperty('kotlin')}" - api 'de.thetaphi:forbiddenapis:3.5.1' + api 'de.thetaphi:forbiddenapis:3.6' api 'com.avast.gradle:gradle-docker-compose-plugin:0.16.11' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" api 'org.apache.maven:maven-model:3.9.4' @@ -128,11 +128,15 @@ dependencies { testFixturesApi "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}" testFixturesApi gradleApi() testFixturesApi gradleTestKit() - testImplementation 'com.github.tomakehurst:wiremock-jre8-standalone:2.32.0' + testImplementation 'org.wiremock:wiremock-standalone:3.1.0' testImplementation "org.mockito:mockito-core:${props.getProperty('mockito')}" integTestImplementation('org.spockframework:spock-core:2.3-groovy-3.0') { exclude module: "groovy" } + implementation('org.ajoberstar.grgit:grgit-core:5.2.0') { + exclude group: 'org.eclipse.jgit', module: 'org.eclipse.jgit' + } + implementation 'org.eclipse.jgit:org.eclipse.jgit:6.7.0.202309050840-r' } configurations.all { diff --git a/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java b/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java index c5b4de157c75c..662510fbbf61c 100644 --- a/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java +++ b/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java @@ -45,17 +45,16 @@ /** * A standalone process that will reap external services after a build dies. - * *

Input

* Since how to reap a given service is platform and service dependent, this tool * operates on system commands to execute. It takes a single argument, a directory * that will contain files with reaping commands. Each line in each file will be * executed with {@link Runtime#exec(String)}. - * + *

* The main method will wait indefinitely on the parent process (Gradle) by * reading from stdin. When Gradle shuts down, whether normally or abruptly, the * pipe will be broken and read will return. - * + *

* The reaper will then iterate over the files in the configured directory, * and execute the given commands. If any commands fail, a failure message is * written to stderr. Otherwise, the input file will be deleted. If no inputs diff --git a/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java b/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java index 3f65abcc25d17..4a229736c3fc6 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java @@ -52,15 +52,15 @@ /** * A container for opensearch supported version information used in BWC testing. - * + *

* Parse the Java source file containing the versions declarations and use the known rules to figure out which are all * the version the current one is wire and index compatible with. * On top of this, figure out which of these are unreleased and provide the branch they can be built from. - * + *

* Note that in this context, currentVersion is the unreleased version this build operates on. * At any point in time there will surely be four such unreleased versions being worked on, * thus currentVersion will be one of these. - * + *

* Considering: *

*
M, M > 0
@@ -84,7 +84,7 @@ * Each build is only concerned with versions before it, as those are the ones that need to be tested * for backwards compatibility. We never look forward, and don't add forward facing version number to branches of previous * version. - * + *

* Each branch has a current version, and expected compatible versions are parsed from the server code's Version` class. * We can reliably figure out which the unreleased versions are due to the convention of always adding the next unreleased * version number to server in all branches when a version is released. diff --git a/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java b/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java index 5ae7ad1595e2f..5259700b3a63d 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java @@ -38,7 +38,7 @@ /** * Writes data passed to this stream as log messages. - * + *

* The stream will be flushed whenever a newline is detected. * Allows setting an optional prefix before each line of output. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java index 159270d28e3d6..c6e49dc44d6bd 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java @@ -76,7 +76,7 @@ public InternalBwcGitPlugin(ProviderFactory providerFactory, ExecOperations exec public void apply(Project project) { this.project = project; this.gitExtension = project.getExtensions().create("bwcGitConfig", BwcGitExtension.class); - Provider remote = providerFactory.systemProperty("bwc.remote").forUseAtConfigurationTime().orElse("opensearch-project"); + Provider remote = providerFactory.systemProperty("bwc.remote").orElse("opensearch-project"); TaskContainer tasks = project.getTasks(); TaskProvider createCloneTaskProvider = tasks.register("createClone", LoggedExec.class, createClone -> { @@ -105,7 +105,6 @@ public void apply(Project project) { String remoteRepo = remote.get(); // for testing only we can override the base remote url String remoteRepoUrl = providerFactory.systemProperty("testRemoteRepo") - .forUseAtConfigurationTime() .getOrElse("https://github.com/" + remoteRepo + "/OpenSearch.git"); addRemote.setCommandLine(asList("git", "remote", "add", remoteRepo, remoteRepoUrl)); }); @@ -113,7 +112,6 @@ public void apply(Project project) { TaskProvider fetchLatestTaskProvider = tasks.register("fetchLatest", LoggedExec.class, fetchLatest -> { Provider gitFetchLatest = project.getProviders() .systemProperty("tests.bwc.git_fetch_latest") - .forUseAtConfigurationTime() .orElse("true") .map(fetchProp -> { if ("true".equals(fetchProp)) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionRule.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionRule.java index aa81ef75701fa..db46d2e3edc55 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionRule.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionRule.java @@ -40,7 +40,7 @@ /** * Represent rules for tests enforced by the @{link {@link TestingConventionsTasks}} - * + *

* Rules are identified by name, tests must have this name as a suffix and implement one of the base classes * and be part of all the specified tasks. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java b/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java index 1423b52c443d9..e82d8ed73ced2 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java @@ -61,7 +61,7 @@ /** * A custom archive task that assembles a tar archive that preserves symbolic links. - * + *

* This task is necessary because the built-in task {@link org.gradle.api.tasks.bundling.Tar} does not preserve symbolic links. */ public class SymbolicLinkPreservingTar extends Tar { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java index 728e36ce98bff..fcadf35593ce6 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/RestResourcesPlugin.java @@ -65,7 +65,7 @@ * Rest YAML tests :
* When the {@link RestResourcesPlugin} has been applied the {@link CopyRestTestsTask} will copy the Rest YAML tests if explicitly * configured with `includeCore` through the `restResources.restTests` extension. - * + *

* Additionally you can specify which sourceSetName resources should be copied to. The default is the yamlRestTest source set. * @see CopyRestApiTask * @see CopyRestTestsTask diff --git a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantMachine.java b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantMachine.java index 2d71b9361963b..7abf9bf5fbef6 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantMachine.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantMachine.java @@ -53,7 +53,7 @@ /** * An helper to manage a vagrant box. - * + *

* This is created alongside a {@link VagrantExtension} for a project to manage starting and * stopping a single vagrant box. */ @@ -185,7 +185,7 @@ public void setArgs(String... args) { /** * A function to translate output from the vagrant command execution to the progress line. - * + *

* The function takes the current line of output from vagrant, and returns a new * progress line, or {@code null} if there is no update. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java index 85d3e340c50e7..ca1b95183505f 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java @@ -47,7 +47,7 @@ /** * A shell script to run within a vagrant VM. - * + *

* The script is run as root within the VM. */ public abstract class VagrantShellTask extends DefaultTask { diff --git a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java index 5c26472cbd33e..844c9ccd2f8e9 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/pluginzip/PublishTests.java @@ -58,7 +58,7 @@ public void tearDown() { * This test is used to verify that adding the 'opensearch.pluginzip' to the project * adds some other transitive plugins and tasks under the hood. This is basically * a behavioral test of the {@link Publish#apply(Project)} method. - * + *

* This is equivalent of having a build.gradle script with just the following section: *

      *     plugins {
@@ -233,7 +233,7 @@ public void useDefaultValues() throws IOException, URISyntaxException, XmlPullPa
         GradleRunner runner = prepareGradleRunnerFromTemplate("useDefaultValues.gradle", "build", ZIP_PUBLISH_TASK);
         BuildResult result = runner.build();
 
-        /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
+        /* Check if build and ZIP_PUBLISH_TASK tasks have run well */
         assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
         assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
 
@@ -308,7 +308,7 @@ public void allProjectsGroup() throws IOException, URISyntaxException, XmlPullPa
         GradleRunner runner = prepareGradleRunnerFromTemplate("allProjectsGroup.gradle", "build", ZIP_PUBLISH_TASK);
         BuildResult result = runner.build();
 
-        /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
+        /* Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
         assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
         assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
 
@@ -343,7 +343,7 @@ public void groupPriorityLevel() throws IOException, URISyntaxException, XmlPull
         GradleRunner runner = prepareGradleRunnerFromTemplate("groupPriorityLevel.gradle", "build", ZIP_PUBLISH_TASK);
         BuildResult result = runner.build();
 
-        /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
+        /* Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
         assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
         assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
 
@@ -379,7 +379,7 @@ public void missingPOMEntity() throws IOException, URISyntaxException, XmlPullPa
         GradleRunner runner = prepareGradleRunnerFromTemplate("missingPOMEntity.gradle", "build", ZIP_PUBLISH_TASK);
         BuildResult result = runner.build();
 
-        /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
+        /* Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
         assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
         assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
 
@@ -426,7 +426,7 @@ public void customizedGroupValue() throws IOException, URISyntaxException, XmlPu
         GradleRunner runner = prepareGradleRunnerFromTemplate("customizedGroupValue.gradle", "build", ZIP_PUBLISH_TASK);
         BuildResult result = runner.build();
 
-        /** Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
+        /* Check if build and {@value ZIP_PUBLISH_TASK} tasks have run well */
         assertEquals(SUCCESS, result.task(":" + "build").getOutcome());
         assertEquals(SUCCESS, result.task(":" + ZIP_PUBLISH_TASK).getOutcome());
 
diff --git a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java
index b64c719440733..def5248c1f255 100644
--- a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java
+++ b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java
@@ -36,7 +36,7 @@
 
 /**
  * Filter out threads controlled by gradle that may be created during unit tests.
- *
+ * 

* Currently this includes pooled threads for Exec as well as file system event watcher threads. */ public class GradleThreadsFilter implements ThreadFilter { diff --git a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java index 163a903d31832..1a2e36aa78e9f 100644 --- a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java +++ b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/JUnit3MethodProvider.java @@ -43,7 +43,7 @@ /** * Backwards compatible test* method provider (public, non-static). - * + *

* copy of org.apache.lucene.util.LuceneJUnit3MethodProvider to avoid a dependency between build and test fw. */ public final class JUnit3MethodProvider implements TestMethodProvider { diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle index cb8050d1718c4..dca2bce94ea6d 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle @@ -15,8 +15,9 @@ plugins { repositories { mavenCentral() } + dependencies { - implementation "org.apache.logging.log4j:log4j-core:2.20.0" + implementation "org.apache.logging.log4j:log4j-core:2.21.1" } ["0.0.1", "0.0.2"].forEach { v -> diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 0f42c9a40cb69..98c64ffb29b35 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ -opensearch = 2.10.0 -lucene = 9.7.0 +opensearch = 2.12.0 +lucene = 9.8.0 bundled_jdk_vendor = adoptium bundled_jdk = 17.0.8+7 @@ -12,9 +12,9 @@ jackson_databind = 2.15.2 snakeyaml = 2.1 icu4j = 70.1 supercsv = 2.4.0 -log4j = 2.20.0 +log4j = 2.21.0 slf4j = 1.7.36 -asm = 9.5 +asm = 9.6 jettison = 1.5.4 woodstox = 6.4.0 kotlin = 1.7.10 @@ -24,9 +24,9 @@ protobuf = 3.22.3 jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle -jna = 5.5.0 +jna = 5.13.0 -netty = 4.1.96.Final +netty = 4.1.100.Final joda = 2.12.2 # client dependencies @@ -35,7 +35,8 @@ httpcore = 4.4.16 httpasyncclient = 4.1.5 commonslogging = 1.2 commonscodec = 1.15 - +commonslang = 3.13.0 +commonscompress = 1.24.0 # plugin dependencies aws = 2.20.55 reactivestreams = 1.0.4 @@ -43,14 +44,14 @@ reactivestreams = 1.0.4 # when updating this version, you need to ensure compatibility with: # - plugins/ingest-attachment (transitive dependency, check the upstream POM) # - distribution/tools/plugin-cli -bouncycastle=1.75 +bouncycastle=1.76 # test dependencies randomizedrunner = 2.7.1 junit = 4.13.2 hamcrest = 2.1 -mockito = 5.4.0 +mockito = 5.5.0 objenesis = 3.2 -bytebuddy = 1.14.3 +bytebuddy = 1.14.7 # benchmark dependencies jmh = 1.35 @@ -63,5 +64,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.26.0 - +opentelemetry = 1.31.0 +opentelemetrysemconv = 1.21.0-alpha diff --git a/client/benchmark/src/main/java/org/opensearch/client/benchmark/metrics/SampleRecorder.java b/client/benchmark/src/main/java/org/opensearch/client/benchmark/metrics/SampleRecorder.java index e53e4f1ad692d..9cd12f5e78bd0 100644 --- a/client/benchmark/src/main/java/org/opensearch/client/benchmark/metrics/SampleRecorder.java +++ b/client/benchmark/src/main/java/org/opensearch/client/benchmark/metrics/SampleRecorder.java @@ -37,7 +37,7 @@ /** * Stores measurement samples. - * + *

* This class is NOT threadsafe. */ public final class SampleRecorder { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java index 5bd5a5d0e308e..eb0a8b0e8f40a 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java @@ -170,8 +170,8 @@ public ClusterHealthResponse health(ClusterHealthRequest healthRequest, RequestO /** * Asynchronously get cluster health using the Cluster Health API. - * * If timeout occurred, {@link ClusterHealthResponse} will have isTimedOut() == true and status() == RestStatus.REQUEST_TIMEOUT + * * @param healthRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index 0dd74e663ba71..80f878d25c859 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -451,9 +451,9 @@ static void addSearchRequestParams(Params params, SearchRequest searchRequest) { params.withIndicesOptions(searchRequest.indicesOptions()); } params.withSearchType(searchRequest.searchType().name().toLowerCase(Locale.ROOT)); - /** - * Merging search responses as part of CCS flow to reduce roundtrips is not supported for point in time - - * refer to org.opensearch.action.search.SearchResponseMerger + /* + Merging search responses as part of CCS flow to reduce roundtrips is not supported for point in time - + refer to org.opensearch.action.search.SearchResponseMerger */ if (searchRequest.pointInTimeBuilder() != null) { params.putParam("ccs_minimize_roundtrips", "false"); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java index dad5b6a3679ec..d40445b2daa81 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java @@ -37,7 +37,7 @@ /** * A base request for any requests that supply timeouts. - * + *

* Please note, any requests that use a ackTimeout should set timeout as they * represent the same backing field on the server. */ diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java index 7805a7853b003..62c5b54c0e75e 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java @@ -156,7 +156,7 @@ public MediaType mappingsMediaType() { /** * Adds mapping that will be added when the index gets created. - * + *

* Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -168,7 +168,7 @@ public CreateIndexRequest mapping(String source, MediaType mediaType) { /** * Adds mapping that will be added when the index gets created. - * + *

* Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -179,7 +179,7 @@ public CreateIndexRequest mapping(XContentBuilder source) { /** * Adds mapping that will be added when the index gets created. - * + *

* Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -196,7 +196,7 @@ public CreateIndexRequest mapping(Map source) { /** * Adds mapping that will be added when the index gets created. - * + *

* Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -282,7 +282,7 @@ public CreateIndexRequest aliases(Collection aliases) { /** * Sets the settings and mappings as a single source. - * + *

* Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(String source, MediaType mediaType) { @@ -291,7 +291,7 @@ public CreateIndexRequest source(String source, MediaType mediaType) { /** * Sets the settings and mappings as a single source. - * + *

* Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(XContentBuilder source) { @@ -300,7 +300,7 @@ public CreateIndexRequest source(XContentBuilder source) { /** * Sets the settings and mappings as a single source. - * + *

* Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(BytesReference source, MediaType mediaType) { @@ -311,7 +311,7 @@ public CreateIndexRequest source(BytesReference source, MediaType mediaType) { /** * Sets the settings and mappings as a single source. - * + *

* Note that the mapping definition should *not* be nested under a type name. */ @SuppressWarnings("unchecked") diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java index 6d7e95d191ba6..a63393bd2341b 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java @@ -105,7 +105,7 @@ public MediaType mediaType() { /** * The mapping source definition. - * + *

* Note that the definition should *not* be nested under a type name. */ public PutMappingRequest source(Map mappingSource) { @@ -120,7 +120,7 @@ public PutMappingRequest source(Map mappingSource) { /** * The mapping source definition. - * + *

* Note that the definition should *not* be nested under a type name. */ public PutMappingRequest source(String mappingSource, MediaType mediaType) { @@ -131,7 +131,7 @@ public PutMappingRequest source(String mappingSource, MediaType mediaType) { /** * The mapping source definition. - * + *

* Note that the definition should *not* be nested under a type name. */ public PutMappingRequest source(XContentBuilder builder) { @@ -142,7 +142,7 @@ public PutMappingRequest source(XContentBuilder builder) { /** * The mapping source definition. - * + *

* Note that the definition should *not* be nested under a type name. */ public PutMappingRequest source(BytesReference source, MediaType mediaType) { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskGroup.java b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskGroup.java index c419884700587..9129de717459f 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskGroup.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskGroup.java @@ -38,7 +38,6 @@ /** * Client side counterpart of server side version. - * * {@link org.opensearch.action.admin.cluster.node.tasks.list.TaskGroup} */ public class TaskGroup { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java index 51ac62830446f..75badc4e3dbf2 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/tasks/TaskInfo.java @@ -54,6 +54,7 @@ public class TaskInfo { private long runningTimeNanos; private boolean cancellable; private boolean cancelled; + private Long cancellationStartTime; private TaskId parentTaskId; private final Map status = new HashMap<>(); private final Map headers = new HashMap<>(); @@ -127,6 +128,14 @@ void setCancelled(boolean cancelled) { this.cancelled = cancelled; } + public Long getCancellationStartTime() { + return this.cancellationStartTime; + } + + public void setCancellationStartTime(Long cancellationStartTime) { + this.cancellationStartTime = cancellationStartTime; + } + public TaskId getParentTaskId() { return parentTaskId; } @@ -180,6 +189,7 @@ private void noOpParse(Object s) {} parser.declareString(TaskInfo::setParentTaskId, new ParseField("parent_task_id")); parser.declareObject(TaskInfo::setHeaders, (p, c) -> p.mapStrings(), new ParseField("headers")); parser.declareObject(TaskInfo::setResourceStats, (p, c) -> p.map(), new ParseField("resource_stats")); + parser.declareLong(TaskInfo::setCancellationStartTime, new ParseField("cancellation_time_millis")); PARSER = (XContentParser p, Void v, String name) -> parser.parse(p, new TaskInfo(new TaskId(name)), null); } @@ -199,7 +209,8 @@ && isCancelled() == taskInfo.isCancelled() && Objects.equals(getParentTaskId(), taskInfo.getParentTaskId()) && Objects.equals(status, taskInfo.status) && Objects.equals(getHeaders(), taskInfo.getHeaders()) - && Objects.equals(getResourceStats(), taskInfo.getResourceStats()); + && Objects.equals(getResourceStats(), taskInfo.getResourceStats()) + && Objects.equals(getCancellationStartTime(), taskInfo.cancellationStartTime); } @Override @@ -216,7 +227,8 @@ public int hashCode() { getParentTaskId(), status, getHeaders(), - getResourceStats() + getResourceStats(), + getCancellationStartTime() ); } @@ -250,6 +262,8 @@ public String toString() { + headers + ", resource_stats=" + resourceStats + + ", cancellationStartTime=" + + cancellationStartTime + '}'; } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/AbstractRequestTestCase.java b/client/rest-high-level/src/test/java/org/opensearch/client/AbstractRequestTestCase.java index 49bcb61b2dc3d..c464ee9ece74a 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/AbstractRequestTestCase.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/AbstractRequestTestCase.java @@ -44,7 +44,7 @@ /** * Base class for HLRC request parsing tests. - * + *

* This case class facilitates generating client side request test instances and * verifies that they are correctly parsed into server side request instances. * diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/AbstractResponseTestCase.java b/client/rest-high-level/src/test/java/org/opensearch/client/AbstractResponseTestCase.java index 27704b01560c4..7d2d6b87b85c6 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/AbstractResponseTestCase.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/AbstractResponseTestCase.java @@ -44,7 +44,7 @@ /** * Base class for HLRC response parsing tests. - * + *

* This case class facilitates generating server side response test instances and * verifies that they are correctly parsed into HLRC response instances. * diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorRetryIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorRetryIT.java index b7f6328b3c88e..3678cc042ba47 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorRetryIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/BulkProcessorRetryIT.java @@ -180,7 +180,7 @@ private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) { /** * Internal helper class to correlate backoff states with bulk responses. This is needed to check whether we maxed out the number * of retries but still got rejected (which is perfectly fine and can also happen from time to time under heavy load). - * + *

* This implementation relies on an implementation detail in Retry, namely that the bulk listener is notified on the same thread * as the last call to the backoff policy's iterator. The advantage is that this is non-invasive to the rest of the production code. */ diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java index 5ec1da77a6795..6e4b83d2420fa 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -24,7 +24,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -72,7 +71,7 @@ public void testCreateAndDeletePit() throws IOException { assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(createPitResponse.getId())); } - public void testDeleteAllAndListAllPits() throws IOException, InterruptedException { + public void testDeleteAllAndListAllPits() throws Exception { CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); CreatePitResponse pitResponse1 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); @@ -91,11 +90,9 @@ public void testDeleteAllAndListAllPits() throws IOException, InterruptedExcepti List pits = getAllPitResponse.getPitInfos().stream().map(r -> r.getPitId()).collect(Collectors.toList()); assertTrue(pits.contains(pitResponse.getId())); assertTrue(pits.contains(pitResponse1.getId())); - CountDownLatch countDownLatch = new CountDownLatch(1); ActionListener deletePitListener = new ActionListener<>() { @Override public void onResponse(DeletePitResponse response) { - countDownLatch.countDown(); for (DeletePitInfo deletePitInfo : response.getDeletePitResults()) { assertTrue(deletePitInfo.isSuccessful()); } @@ -103,19 +100,20 @@ public void onResponse(DeletePitResponse response) { @Override public void onFailure(Exception e) { - countDownLatch.countDown(); if (!(e instanceof OpenSearchStatusException)) { throw new AssertionError("Delete all failed"); } } }; final CreatePitResponse pitResponse3 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); - + assertTrue(pitResponse3.getId() != null); ActionListener getPitsListener = new ActionListener() { @Override public void onResponse(GetAllPitNodesResponse response) { List pits = response.getPitInfos().stream().map(r -> r.getPitId()).collect(Collectors.toList()); assertTrue(pits.contains(pitResponse3.getId())); + // delete all pits + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); } @Override @@ -126,11 +124,12 @@ public void onFailure(Exception e) { } }; highLevelClient().getAllPitsAsync(RequestOptions.DEFAULT, getPitsListener); - highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); + // validate no pits case - getAllPitResponse = highLevelClient().getAllPits(RequestOptions.DEFAULT); - assertTrue(getAllPitResponse.getPitInfos().size() == 0); - highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + assertBusy(() -> { + GetAllPitNodesResponse getAllPitResponse1 = highLevelClient().getAllPits(RequestOptions.DEFAULT); + assertTrue(getAllPitResponse1.getPitInfos().size() == 0); + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + }); } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java index abb2d75aea751..ce080b45273b4 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java @@ -137,15 +137,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[example] * -------------------------------------------------- - * + *

* The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java index d14759065b5eb..28909cf58541a 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java @@ -65,15 +65,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/IngestClientDocumentationIT.java[example] * -------------------------------------------------- - * + *

* The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java index 50bcf79642eac..d0015db044843 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java @@ -90,15 +90,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[example] * -------------------------------------------------- - * + *

* The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java index 6916ae11556e2..2e2d15df5392a 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java @@ -66,15 +66,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[example] * -------------------------------------------------- - * + *

* The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/TasksClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/TasksClientDocumentationIT.java index 03e267aafd1b7..cbac0b8c97d9c 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/TasksClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/TasksClientDocumentationIT.java @@ -66,15 +66,15 @@ * You need to wrap your code between two tags like: * // tag::example * // end::example - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/{@link TasksClientDocumentationIT}.java[example] * -------------------------------------------------- - * + *

* The column width of the code block is 84. If the code contains a line longer * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/RandomCreateIndexGenerator.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/RandomCreateIndexGenerator.java index 1f747dc139d15..edb4d16c6d992 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/RandomCreateIndexGenerator.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/RandomCreateIndexGenerator.java @@ -44,7 +44,7 @@ public class RandomCreateIndexGenerator { /** * Returns a random {@link CreateIndexRequest}. - * + *

* Randomizes the index name, the aliases, mappings and settings associated with the * index. When present, the mappings make no mention of types. */ diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java index 835a93b5b09ce..faf5024d0c173 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java @@ -84,6 +84,10 @@ protected CancelTasksResponseTests.ByNodeCancelTasksResponse createServerTestIns for (int i = 0; i < 4; i++) { boolean cancellable = randomBoolean(); boolean cancelled = cancellable == true ? randomBoolean() : false; + Long cancellationStartTime = null; + if (cancelled) { + cancellationStartTime = randomNonNegativeLong(); + } tasks.add( new org.opensearch.tasks.TaskInfo( new TaskId(NODE_ID, (long) i), @@ -97,7 +101,8 @@ protected CancelTasksResponseTests.ByNodeCancelTasksResponse createServerTestIns cancelled, new TaskId("node1", randomLong()), Collections.singletonMap("x-header-of", "some-value"), - null + null, + cancellationStartTime ) ); } @@ -135,6 +140,7 @@ protected void assertInstances( assertEquals(ti.isCancelled(), taskInfo.isCancelled()); assertEquals(ti.getParentTaskId().getNodeId(), taskInfo.getParentTaskId().getNodeId()); assertEquals(ti.getParentTaskId().getId(), taskInfo.getParentTaskId().getId()); + assertEquals(ti.getCancellationStartTime(), taskInfo.getCancellationStartTime()); FakeTaskStatus status = (FakeTaskStatus) ti.getStatus(); assertEquals(status.code, taskInfo.getStatus().get("code")); assertEquals(status.status, taskInfo.getStatus().get("status")); diff --git a/client/rest/src/main/java/org/opensearch/client/RestClient.java b/client/rest/src/main/java/org/opensearch/client/RestClient.java index 946b06e46d6fc..210638e60c0f5 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClient.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClient.java @@ -301,7 +301,7 @@ public boolean isRunning() { * they will be retried). In case of failures all of the alive nodes (or * dead nodes that deserve a retry) are retried until one responds or none * of them does, in which case an {@link IOException} will be thrown. - * + *

* This method works by performing an asynchronous call and waiting * for the result. If the asynchronous call throws an exception we wrap * it and rethrow it so that the stack trace attached to the exception diff --git a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java index ae4b8f4f20a4d..ff7aaffb6f218 100644 --- a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java @@ -81,15 +81,15 @@ * You need to wrap your code between two tags like: * // tag::example[] * // end::example[] - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/RestClientDocumentation.java[example] * -------------------------------------------------- - * + *

* Note that this is not a test class as we are only interested in testing that docs snippets compile. We don't want * to send requests to a node and we don't even have the tools to do it. */ diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java index e337e8930ad2a..6cdf8900c2434 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java @@ -49,15 +49,15 @@ * You need to wrap your code between two tags like: * // tag::example[] * // end::example[] - * + *

* Where example is your tag name. - * + *

* Then in the documentation, you can extract what is between tag and end tags with * ["source","java",subs="attributes,callouts,macros"] * -------------------------------------------------- * include-tagged::{doc-tests}/SnifferDocumentation.java[example] * -------------------------------------------------- - * + *

* Note that this is not a test class as we are only interested in testing that docs snippets compile. We don't want * to send requests to a node and we don't even have the tools to do it. */ diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 8ccb6d398bac7..394d47639fb0a 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -213,7 +213,7 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { configurationFile '/etc/opensearch/jvm.options' configurationFile '/etc/opensearch/log4j2.properties' from("${packagingFiles}") { - dirMode 02750 + dirMode 0750 into('/etc') permissionGroup 'opensearch' includeEmptyDirs true @@ -223,7 +223,7 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { } from("${packagingFiles}/etc/opensearch") { into('/etc/opensearch') - dirMode 02750 + dirMode 0750 fileMode 0660 permissionGroup 'opensearch' includeEmptyDirs true @@ -281,8 +281,8 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { dirMode mode } } - copyEmptyDir('/var/log/opensearch', 'opensearch', 'opensearch', 02750) - copyEmptyDir('/var/lib/opensearch', 'opensearch', 'opensearch', 02750) + copyEmptyDir('/var/log/opensearch', 'opensearch', 'opensearch', 0750) + copyEmptyDir('/var/lib/opensearch', 'opensearch', 'opensearch', 0750) copyEmptyDir('/usr/share/opensearch/plugins', 'root', 'root', 0755) into '/usr/share/opensearch' diff --git a/distribution/packages/src/deb/lintian/opensearch b/distribution/packages/src/deb/lintian/opensearch index 854b23131ecbc..e6db8e8c6b322 100644 --- a/distribution/packages/src/deb/lintian/opensearch +++ b/distribution/packages/src/deb/lintian/opensearch @@ -15,11 +15,11 @@ missing-dep-on-jarwrapper # we prefer to not make our config and log files world readable non-standard-file-perm etc/default/opensearch 0660 != 0644 -non-standard-dir-perm etc/opensearch/ 2750 != 0755 -non-standard-dir-perm etc/opensearch/jvm.options.d/ 2750 != 0755 +non-standard-dir-perm etc/opensearch/ 0750 != 0755 +non-standard-dir-perm etc/opensearch/jvm.options.d/ 0750 != 0755 non-standard-file-perm etc/opensearch/* -non-standard-dir-perm var/lib/opensearch/ 2750 != 0755 -non-standard-dir-perm var/log/opensearch/ 2750 != 0755 +non-standard-dir-perm var/lib/opensearch/ 0750 != 0755 +non-standard-dir-perm var/log/opensearch/ 0750 != 0755 executable-is-not-world-readable etc/init.d/opensearch 0750 non-standard-file-permissions-for-etc-init.d-script etc/init.d/opensearch 0750 != 0755 diff --git a/distribution/src/config/opensearch.yml b/distribution/src/config/opensearch.yml index 3c4fe822005e0..b7ab2e1c2309b 100644 --- a/distribution/src/config/opensearch.yml +++ b/distribution/src/config/opensearch.yml @@ -92,24 +92,16 @@ ${path.logs} # cluster.remote_store.enabled: true # # Repository to use for segment upload while enforcing remote store for an index -# cluster.remote_store.segment.repository: my-repo-1 +# node.attr.remote_store.segment.repository: my-repo-1 # # Repository to use for translog upload while enforcing remote store for an index -# cluster.remote_store.translog.repository: my-repo-1 +# node.attr.remote_store.translog.repository: my-repo-1 # # ---------------------------------- Experimental Features ----------------------------------- -# # Gates the visibility of the experimental segment replication features until they are production ready. # #opensearch.experimental.feature.segment_replication_experimental.enabled: false # -# -# Gates the visibility of the index setting that allows persisting data to remote store along with local disk. -# Once the feature is ready for production release, this feature flag can be removed. -# -#opensearch.experimental.feature.remote_store.enabled: false -# -# # Gates the functionality of a new parameter to the snapshot restore API # that allows for creation of a new index type that searches a snapshot # directly in a remote repository without restoring all index data to disk @@ -129,3 +121,9 @@ ${path.logs} # index searcher threadpool. # #opensearch.experimental.feature.concurrent_segment_search.enabled: false +# +# +# Gates the optimization of datetime formatters caching along with change in default datetime formatter +# Once there is no observed impact on performance, this feature flag can be removed. +# +#opensearch.experimental.optimization.datetime_formatter_caching.enabled: false diff --git a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java index fc613ccdaae68..62b2ded100cda 100644 --- a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java +++ b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java @@ -92,15 +92,15 @@ private static String maybeShowCodeDetailsInExceptionMessages() { } private static String javaLocaleProviders() { - /** - * SPI setting is used to allow loading custom CalendarDataProvider - * in jdk8 it has to be loaded from jre/lib/ext, - * in jdk9+ it is already within ES project and on a classpath - * - * Due to internationalization enhancements in JDK 9 OpenSearch need to set the provider to COMPAT otherwise time/date - * parsing will break in an incompatible way for some date patterns and locales. - * //TODO COMPAT will be deprecated in at some point, see please https://bugs.openjdk.java.net/browse/JDK-8232906 - * See also: documentation in server/org.opensearch.common.time.IsoCalendarDataProvider + /* + SPI setting is used to allow loading custom CalendarDataProvider + in jdk8 it has to be loaded from jre/lib/ext, + in jdk9+ it is already within ES project and on a classpath + + Due to internationalization enhancements in JDK 9 OpenSearch need to set the provider to COMPAT otherwise time/date + parsing will break in an incompatible way for some date patterns and locales. + //TODO COMPAT will be deprecated in at some point, see please https://bugs.openjdk.java.net/browse/JDK-8232906 + See also: documentation in server/org.opensearch.common.time.IsoCalendarDataProvider */ return "-Djava.locale.providers=SPI,COMPAT"; } diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 5103999428814..b61a00aba04bc 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -38,14 +38,14 @@ dependencies { compileOnly project(":server") compileOnly project(":libs:opensearch-cli") api "org.bouncycastle:bcpg-fips:1.0.7.1" - api "org.bouncycastle:bc-fips:1.0.2.3" + api "org.bouncycastle:bc-fips:1.0.2.4" testImplementation project(":test:framework") testImplementation 'com.google.jimfs:jimfs:1.3.0' testRuntimeOnly("com.google.guava:guava:${versions.guava}") { transitive = false } - implementation 'org.apache.commons:commons-compress:1.23.0' + implementation "org.apache.commons:commons-compress:${versions.commonscompress}" } tasks.named("dependencyLicenses").configure { diff --git a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.3.jar.sha1 b/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.3.jar.sha1 deleted file mode 100644 index c71320050b7de..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da62b32cb72591f5b4d322e6ab0ce7de3247b534 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.4.jar.sha1 b/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.4.jar.sha1 new file mode 100644 index 0000000000000..da37449f80d7e --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bc-fips-1.0.2.4.jar.sha1 @@ -0,0 +1 @@ +9008d04fc13da6455e6a792935b93b629757335d \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/commons-compress-1.23.0.jar.sha1 b/distribution/tools/plugin-cli/licenses/commons-compress-1.23.0.jar.sha1 deleted file mode 100644 index 48dba88409c17..0000000000000 --- a/distribution/tools/plugin-cli/licenses/commons-compress-1.23.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4af2060ea9b0c8b74f1854c6cafe4d43cfc161fc \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/commons-compress-1.24.0.jar.sha1 b/distribution/tools/plugin-cli/licenses/commons-compress-1.24.0.jar.sha1 new file mode 100644 index 0000000000000..23999d1bfbde4 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/commons-compress-1.24.0.jar.sha1 @@ -0,0 +1 @@ +b4b1b5a3d9573b2970fddab236102c0a4d27d35e \ No newline at end of file diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java index c222b82dfa480..60141968be48e 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java @@ -107,7 +107,7 @@ /** * A command for the plugin cli to install a plugin into opensearch. - * + *

* The install command takes a plugin id, which may be any of the following: *

    *
  • An official opensearch plugin name
  • @@ -411,7 +411,7 @@ private String getMavenUrl(Terminal terminal, String[] coordinates, String platf /** * Returns {@code true} if the given url exists, and {@code false} otherwise. - * + *

    * The given url must be {@code https} and existing means a {@code HEAD} request returns 200. */ // pkg private for tests to manipulate @@ -698,7 +698,6 @@ InputStream getPublicKey() { /** * Creates a URL and opens a connection. - * * If the URL returns a 404, {@code null} is returned, otherwise the open URL opject is returned. */ // pkg private for tests diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java index 579f676631a5a..02be3dbc82a44 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java @@ -41,7 +41,7 @@ * The listener is triggered whenever a full percent is increased * The listener is never triggered twice on the same percentage * The listener will always return 99 percent, if the expectedTotalSize is exceeded, until it is finished - * + *

    * Only used by the InstallPluginCommand, thus package private here */ abstract class ProgressInputStream extends FilterInputStream { diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java index b7dcbd50cf781..708f644bcdeb6 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java @@ -17,7 +17,7 @@ * An interface for an upgrade task, which in this instance is an unit of * operation that is part of the overall upgrade process. This extends the * {@link java.util.function.Consumer} interface. - * + *

    * The implementing tasks consume and instance of a tuple of {@link TaskInput} * and {@link Terminal} and operate via side effects. * diff --git a/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java b/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java index e6122e7baf91a..e1ad55fe4b60b 100644 --- a/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java +++ b/doc-tools/missing-doclet/src/main/java/org/opensearch/missingdoclet/MissingDoclet.java @@ -45,7 +45,7 @@ * It isn't recursive, just ignores exactly the elements you tell it. * Has option --missing-method to apply "method" level to selected packages (fix one at a time). * Matches package names exactly: so you'll need to list subpackages separately. - * + *

    * Note: This by default ignores javadoc validation on overridden methods. */ // Original version of this class is ported from MissingDoclet code in Lucene, diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle index ab2eddf16eacf..a9e64a1a93da5 100644 --- a/gradle/missing-javadoc.gradle +++ b/gradle/missing-javadoc.gradle @@ -159,6 +159,7 @@ configure([ project(":plugins:repository-s3"), project(":plugins:store-smb"), project(":plugins:transport-nio"), + project(":plugins:crypto-kms"), project(":qa:die-with-dignity"), project(":qa:os"), project(":qa:wildfly"), diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index f01f0a84a786a..adfb521550eb9 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.3-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.4-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=bb09982fdf52718e4c7b25023d10df6d35a5fff969860bdf5a5bd27a3ab27a9e +distributionSha256Sum=f2b9ed0faf8472cbe469255ae6c86eddb77076c75191741b4a462f33128dd419 diff --git a/gradlew b/gradlew index 0adc8e1a53214..1aa94a4269074 100755 --- a/gradlew +++ b/gradlew @@ -145,7 +145,7 @@ if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then case $MAX_FD in #( max*) # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. - # shellcheck disable=SC3045 + # shellcheck disable=SC2039,SC3045 MAX_FD=$( ulimit -H -n ) || warn "Could not query maximum file descriptor limit" esac @@ -153,7 +153,7 @@ if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then '' | soft) :;; #( *) # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. - # shellcheck disable=SC3045 + # shellcheck disable=SC2039,SC3045 ulimit -n "$MAX_FD" || warn "Could not set maximum file descriptor limit to $MAX_FD" esac @@ -202,11 +202,11 @@ fi # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' -# Collect all arguments for the java command; -# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of -# shell script including quotes and variable substitutions, so put them in -# double quotes to make sure that they get re-expanded; and -# * put everything else in single quotes, so that it's not re-expanded. +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. set -- \ "-Dorg.gradle.appname=$APP_BASE_NAME" \ diff --git a/libs/cli/src/main/java/org/opensearch/cli/Command.java b/libs/cli/src/main/java/org/opensearch/cli/Command.java index eed5c4ba4ee6f..cc9230bdb2282 100644 --- a/libs/cli/src/main/java/org/opensearch/cli/Command.java +++ b/libs/cli/src/main/java/org/opensearch/cli/Command.java @@ -162,7 +162,7 @@ protected static void exit(int status) { /** * Executes this command. - * + *

    * Any runtime user errors (like an input file that does not exist), should throw a {@link UserException}. */ protected abstract void execute(Terminal terminal, OptionSet options) throws Exception; diff --git a/libs/cli/src/main/java/org/opensearch/cli/ExitCodes.java b/libs/cli/src/main/java/org/opensearch/cli/ExitCodes.java index c705177b0d7b6..90efc89a08caf 100644 --- a/libs/cli/src/main/java/org/opensearch/cli/ExitCodes.java +++ b/libs/cli/src/main/java/org/opensearch/cli/ExitCodes.java @@ -36,20 +36,34 @@ * POSIX exit codes. */ public class ExitCodes { + /** No error */ public static final int OK = 0; - public static final int USAGE = 64; /* command line usage error */ - public static final int DATA_ERROR = 65; /* data format error */ - public static final int NO_INPUT = 66; /* cannot open input */ - public static final int NO_USER = 67; /* addressee unknown */ - public static final int NO_HOST = 68; /* host name unknown */ - public static final int UNAVAILABLE = 69; /* service unavailable */ - public static final int CODE_ERROR = 70; /* internal software error */ - public static final int CANT_CREATE = 73; /* can't create (user) output file */ - public static final int IO_ERROR = 74; /* input/output error */ - public static final int TEMP_FAILURE = 75; /* temp failure; user is invited to retry */ - public static final int PROTOCOL = 76; /* remote error in protocol */ - public static final int NOPERM = 77; /* permission denied */ - public static final int CONFIG = 78; /* configuration error */ + /** command line usage error */ + public static final int USAGE = 64; + /** data format error */ + public static final int DATA_ERROR = 65; + /** cannot open input */ + public static final int NO_INPUT = 66; + /** addressee unknown */ + public static final int NO_USER = 67; + /** host name unknown */ + public static final int NO_HOST = 68; + /** service unavailable */ + public static final int UNAVAILABLE = 69; + /** internal software error */ + public static final int CODE_ERROR = 70; + /** can't create (user) output file */ + public static final int CANT_CREATE = 73; + /** input/output error */ + public static final int IO_ERROR = 74; + /** temp failure; user is invited to retry */ + public static final int TEMP_FAILURE = 75; + /** remote error in protocol */ + public static final int PROTOCOL = 76; + /** permission denied */ + public static final int NOPERM = 77; + /** configuration error */ + public static final int CONFIG = 78; private ExitCodes() { /* no instance, just constants */ } } diff --git a/libs/cli/src/main/java/org/opensearch/cli/Terminal.java b/libs/cli/src/main/java/org/opensearch/cli/Terminal.java index 657b95fa052ab..fb1097178e5a3 100644 --- a/libs/cli/src/main/java/org/opensearch/cli/Terminal.java +++ b/libs/cli/src/main/java/org/opensearch/cli/Terminal.java @@ -44,13 +44,15 @@ /** * A Terminal wraps access to reading input and writing output for a cli. - * + *

    * The available methods are similar to those of {@link Console}, with the ability * to read either normal text or a password, and the ability to print a line * of text. Printing is also gated by the {@link Verbosity} of the terminal, * which allows {@link #println(Verbosity,String)} calls which act like a logger, * only actually printing if the verbosity level of the terminal is above * the verbosity of the message. + * @see ConsoleTerminal + * @see SystemTerminal */ public abstract class Terminal { @@ -65,35 +67,57 @@ private static PrintWriter newErrorWriter() { return new PrintWriter(System.err); } - /** Defines the available verbosity levels of messages to be printed. */ + /** Defines the available verbosity levels of messages to be printed.*/ public enum Verbosity { - SILENT, /* always printed */ - NORMAL, /* printed when no options are given to cli */ - VERBOSE /* printed only when cli is passed verbose option */ + /** always printed */ + SILENT, + /** printed when no options are given to cli */ + NORMAL, + /** printed only when cli is passed verbose option */ + VERBOSE } /** The current verbosity for the terminal, defaulting to {@link Verbosity#NORMAL}. */ private Verbosity verbosity = Verbosity.NORMAL; - /** The newline used when calling println. */ + /** The newline separator used when calling println. */ private final String lineSeparator; + /** Constructs a new terminal with the given line separator. + * @param lineSeparator the line separator to use when calling println + * */ protected Terminal(String lineSeparator) { this.lineSeparator = lineSeparator; } - /** Sets the verbosity of the terminal. */ + /** Sets the {@link Terminal#verbosity} of the terminal. (Default is {@link Verbosity#NORMAL}) + * @param verbosity the {@link Verbosity} level that will be used for printing + * */ public void setVerbosity(Verbosity verbosity) { this.verbosity = verbosity; } - /** Reads clear text from the terminal input. See {@link Console#readLine()}. */ + /** Reads clear text from the terminal input. + * @see Console#readLine() + * @param prompt message to display to the user + * @return the text entered by the user + * */ public abstract String readText(String prompt); - /** Reads password text from the terminal input. See {@link Console#readPassword()}}. */ + /** Reads secret text from the terminal input with echoing disabled. + * @see Console#readPassword() + * @param prompt message to display to the user + * @return the secret as a character array + * */ public abstract char[] readSecret(String prompt); - /** Read password text form terminal input up to a maximum length. */ + /** Read secret text from terminal input with echoing disabled, up to a maximum length. + * @see Console#readPassword() + * @param prompt message to display to the user + * @param maxLength the maximum length of the secret + * @return the secret as a character array + * @throws IllegalStateException if the secret exceeds the maximum length + * */ public char[] readSecret(String prompt, int maxLength) { char[] result = readSecret(prompt); if (result.length > maxLength) { @@ -103,30 +127,48 @@ public char[] readSecret(String prompt, int maxLength) { return result; } - /** Returns a Writer which can be used to write to the terminal directly using standard output. */ + /** Returns a Writer which can be used to write to the terminal directly using standard output. + * @return a writer to {@link Terminal#DEFAULT} output + * @see Terminal.ConsoleTerminal + * @see Terminal.SystemTerminal + * */ public abstract PrintWriter getWriter(); - /** Returns a Writer which can be used to write to the terminal directly using standard error. */ + /** Returns a Writer which can be used to write to the terminal directly using standard error. + * @return a writer to stderr + * */ public PrintWriter getErrorWriter() { return ERROR_WRITER; } - /** Prints a line to the terminal at {@link Verbosity#NORMAL} verbosity level. */ + /** Prints a line to the terminal at {@link Verbosity#NORMAL} verbosity level, with a {@link Terminal#lineSeparator} + * @param msg the message to print + * */ public final void println(String msg) { println(Verbosity.NORMAL, msg); } - /** Prints a line to the terminal at {@code verbosity} level. */ + /** Prints message to the terminal's standard output at {@link Verbosity} level, with a {@link Terminal#lineSeparator}. + * @param verbosity the {@link Verbosity} level at which to print + * @param msg the message to print + * */ public final void println(Verbosity verbosity, String msg) { print(verbosity, msg + lineSeparator); } - /** Prints message to the terminal's standard output at {@code verbosity} level, without a newline. */ + /** Prints message to the terminal's standard output at {@link Verbosity} level, without adding a {@link Terminal#lineSeparator}. + * @param verbosity the {@link Verbosity} level at which to print + * @param msg the message to print + * */ public final void print(Verbosity verbosity, String msg) { print(verbosity, msg, false); } - /** Prints message to the terminal at {@code verbosity} level, without a newline. */ + /** Prints message to either standard or error output at {@link Verbosity} level, without adding a {@link Terminal#lineSeparator}. + * @param verbosity the {@link Verbosity} level at which to print. + * @param msg the message to print + * @param isError if true, prints to standard error instead of standard output + * */ private void print(Verbosity verbosity, String msg, boolean isError) { if (isPrintable(verbosity)) { PrintWriter writer = isError ? getErrorWriter() : getWriter(); @@ -135,29 +177,44 @@ private void print(Verbosity verbosity, String msg, boolean isError) { } } - /** Prints a line to the terminal's standard error at {@link Verbosity#NORMAL} verbosity level, without a newline. */ + /** Prints a line to the terminal's standard error at {@link Verbosity} level, without adding a {@link Terminal#lineSeparator}. + * @param verbosity the {@link Verbosity} level at which to print. + * @param msg the message to print + * */ public final void errorPrint(Verbosity verbosity, String msg) { print(verbosity, msg, true); } - /** Prints a line to the terminal's standard error at {@link Verbosity#NORMAL} verbosity level. */ + /** Prints a line to the terminal's standard error at {@link Verbosity#NORMAL} verbosity level, with a {@link Terminal#lineSeparator} + * @param msg the message to print + * */ public final void errorPrintln(String msg) { errorPrintln(Verbosity.NORMAL, msg); } - /** Prints a line to the terminal's standard error at {@code verbosity} level. */ + /** Prints a line to the terminal's standard error at {@link Verbosity} level, with a {@link Terminal#lineSeparator}. + * @param verbosity the {@link Verbosity} level at which to print. + * @param msg the message to print + * */ public final void errorPrintln(Verbosity verbosity, String msg) { errorPrint(verbosity, msg + lineSeparator); } - /** Checks if is enough {@code verbosity} level to be printed */ + /** Checks if given {@link Verbosity} level is high enough to be printed at the level defined by {@link Terminal#verbosity} + * @param verbosity the {@link Verbosity} level to check + * @return true if the {@link Verbosity} level is high enough to be printed + * @see Terminal#setVerbosity(Verbosity) + * */ public final boolean isPrintable(Verbosity verbosity) { return this.verbosity.ordinal() >= verbosity.ordinal(); } /** - * Prompt for a yes or no answer from the user. This method will loop until 'y' or 'n' + * Prompt for a yes or no answer from the user. This method will loop until 'y', 'n' * (or the default empty value) is entered. + * @param prompt the prompt to display to the user + * @param defaultYes if true, the default answer is 'y', otherwise it is 'n' + * @return true if the user answered 'y', false if the user answered 'n' or the defaultYes value if the user entered nothing */ public final boolean promptYesNo(String prompt, boolean defaultYes) { String answerPrompt = defaultYes ? " [Y/n]" : " [y/N]"; @@ -181,6 +238,11 @@ public final boolean promptYesNo(String prompt, boolean defaultYes) { * character is immediately preceded by a carriage return, we have * a Windows-style newline, so we discard the carriage return as well * as the newline. + * @param reader the reader to read from + * @param maxLength the maximum length of the line to read + * @return the line read from the reader + * @throws RuntimeException if the line read exceeds the maximum length + * @throws RuntimeException if an IOException occurs while reading */ public static char[] readLineToCharArray(Reader reader, int maxLength) { char[] buf = new char[maxLength + 2]; @@ -215,6 +277,7 @@ public static char[] readLineToCharArray(Reader reader, int maxLength) { } } + /** Flushes the terminal's standard output and standard error. */ public void flush() { this.getWriter().flush(); this.getErrorWriter().flush(); diff --git a/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java b/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java index c4ba778e7db86..fc5e364241d12 100644 --- a/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java +++ b/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java @@ -104,7 +104,7 @@ public static void checkJarHell(Consumer output) throws IOException, URI /** * Parses the classpath into an array of URLs - * @return array of URLs + * @return collection of URLs * @throws IllegalStateException if the classpath contains empty elements */ public static Set parseClassPath() { @@ -114,7 +114,7 @@ public static Set parseClassPath() { /** * Parses the classpath into a set of URLs. For testing. * @param classPath classpath to parse (typically the system property {@code java.class.path}) - * @return array of URLs + * @return collection of URLs * @throws IllegalStateException if the classpath contains empty elements */ @SuppressForbidden(reason = "resolves against CWD because that is how classpaths work") diff --git a/libs/common/src/main/java/org/opensearch/common/CheckedConsumer.java b/libs/common/src/main/java/org/opensearch/common/CheckedConsumer.java index dede06d0e207d..07b4973c3a340 100644 --- a/libs/common/src/main/java/org/opensearch/common/CheckedConsumer.java +++ b/libs/common/src/main/java/org/opensearch/common/CheckedConsumer.java @@ -32,6 +32,8 @@ package org.opensearch.common; +import org.opensearch.common.annotation.PublicApi; + import java.util.function.Consumer; /** @@ -39,6 +41,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") @FunctionalInterface public interface CheckedConsumer { void accept(T t) throws E; diff --git a/libs/common/src/main/java/org/opensearch/common/Explicit.java b/libs/common/src/main/java/org/opensearch/common/Explicit.java index 66e079c461e75..e060baf6f187e 100644 --- a/libs/common/src/main/java/org/opensearch/common/Explicit.java +++ b/libs/common/src/main/java/org/opensearch/common/Explicit.java @@ -38,7 +38,7 @@ * Holds a value that is either: * a) set implicitly e.g. through some default value * b) set explicitly e.g. from a user selection - * + *

    * When merging conflicting configuration settings such as * field mapping settings it is preferable to preserve an explicit * choice rather than a choice made only made implicitly by defaults. diff --git a/libs/common/src/main/java/org/opensearch/common/LocalTimeOffset.java b/libs/common/src/main/java/org/opensearch/common/LocalTimeOffset.java index 7e89641927ed5..eb7b331c9aa24 100644 --- a/libs/common/src/main/java/org/opensearch/common/LocalTimeOffset.java +++ b/libs/common/src/main/java/org/opensearch/common/LocalTimeOffset.java @@ -514,7 +514,7 @@ public boolean anyMoveBackToPreviousDay() { * Builds an array that can be {@link Arrays#binarySearch(long[], long)}ed * for the daylight savings time transitions. * - * @openearch.internal + * @opensearch.internal */ private static class TransitionArrayLookup extends AbstractManyTransitionsLookup { private final LocalTimeOffset[] offsets; diff --git a/libs/common/src/main/java/org/opensearch/common/Numbers.java b/libs/common/src/main/java/org/opensearch/common/Numbers.java index 084e52a41f8b1..d5a364a4a934e 100644 --- a/libs/common/src/main/java/org/opensearch/common/Numbers.java +++ b/libs/common/src/main/java/org/opensearch/common/Numbers.java @@ -260,4 +260,12 @@ public static double unsignedLongToDouble(long value) { // want to replace that with 1 in the shifted value for correct rounding. return (double) ((value >>> 1) | (value & 1)) * 2.0; } + + /** + * Return the strictly greater next power of two for the given value. + * For zero and negative numbers, it returns 1. + */ + public static long nextPowerOfTwo(long value) { + return 1L << (Long.SIZE - Long.numberOfLeadingZeros(value)); + } } diff --git a/libs/common/src/main/java/org/opensearch/common/SetOnce.java b/libs/common/src/main/java/org/opensearch/common/SetOnce.java index a596b5fcdb61d..778926ce108b7 100644 --- a/libs/common/src/main/java/org/opensearch/common/SetOnce.java +++ b/libs/common/src/main/java/org/opensearch/common/SetOnce.java @@ -35,7 +35,7 @@ * A convenient class which offers a semi-immutable object wrapper implementation which allows one * to set the value of an object exactly once, and retrieve it many times. If {@link #set(Object)} * is called more than once, {@link AlreadySetException} is thrown and the operation will fail. - * + *

    * This is borrowed from lucene's experimental API. It is not reused to eliminate the dependency * on lucene core for such a simple (standalone) utility class that may change beyond OpenSearch needs. * diff --git a/libs/common/src/main/java/org/opensearch/common/action/ActionFuture.java b/libs/common/src/main/java/org/opensearch/common/action/ActionFuture.java index bff22b7d3cb18..827b7dfd51705 100644 --- a/libs/common/src/main/java/org/opensearch/common/action/ActionFuture.java +++ b/libs/common/src/main/java/org/opensearch/common/action/ActionFuture.java @@ -32,6 +32,7 @@ package org.opensearch.common.action; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import java.util.concurrent.Future; @@ -42,6 +43,7 @@ * * @opensearch.internal */ +@PublicApi(since = "1.0.0") public interface ActionFuture extends Future { /** diff --git a/libs/common/src/main/java/org/opensearch/common/collect/Iterators.java b/libs/common/src/main/java/org/opensearch/common/collect/Iterators.java index c7e7ae6a44a21..9b64932356c10 100644 --- a/libs/common/src/main/java/org/opensearch/common/collect/Iterators.java +++ b/libs/common/src/main/java/org/opensearch/common/collect/Iterators.java @@ -41,6 +41,15 @@ * @opensearch.internal */ public class Iterators { + + /** + * Concat iterators + * + * @param iterators the iterators to concat + * @param the type of iterator + * @return a new {@link ConcatenatedIterator} + * @throws NullPointerException if iterators is null + */ public static Iterator concat(Iterator... iterators) { if (iterators == null) { throw new NullPointerException("iterators"); @@ -71,6 +80,11 @@ static class ConcatenatedIterator implements Iterator { this.iterators = iterators; } + /** + * Returns {@code true} if the iteration has more elements. (In other words, returns {@code true} if {@link #next} would return an + * element rather than throwing an exception.) + * @return {@code true} if the iteration has more elements + */ @Override public boolean hasNext() { boolean hasNext = false; @@ -81,6 +95,11 @@ public boolean hasNext() { return hasNext; } + /** + * Returns the next element in the iteration. + * @return the next element in the iteration + * @throws NoSuchElementException if the iteration has no more elements + */ @Override public T next() { if (!hasNext()) { diff --git a/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java b/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java index 36bc5504061f5..5c0e3f2de7708 100644 --- a/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java +++ b/libs/common/src/main/java/org/opensearch/common/collect/Tuple.java @@ -34,7 +34,6 @@ /** * Java 9 Tuple - * * todo: deprecate and remove w/ min jdk upgrade to 11? * * @opensearch.internal @@ -61,6 +60,20 @@ public V2 v2() { return v2; } + /** + * Returns {@code true} if the given object is also a tuple and the two tuples + * have equal {@link #v1()} and {@link #v2()} values. + *

    + * Returns {@code false} otherwise, including for {@code null} values or + * objects of different types. + *

    + * Note: {@code Tuple} instances are equal if the underlying values are + * equal, even if the types are different. + * + * @param o the object to compare to + * @return {@code true} if the given object is also a tuple and the two tuples + * have equal {@link #v1()} and {@link #v2()} values. + */ @Override public boolean equals(Object o) { if (this == o) return true; @@ -74,6 +87,10 @@ public boolean equals(Object o) { return true; } + /** + * Returns the hash code value for this Tuple. + * @return the hash code value for this Tuple. + */ @Override public int hashCode() { int result = v1 != null ? v1.hashCode() : 0; @@ -81,6 +98,10 @@ public int hashCode() { return result; } + /** + * Returns a string representation of a Tuple + * @return {@code "Tuple [v1=value1, v2=value2]"} + */ @Override public String toString() { return "Tuple [v1=" + v1 + ", v2=" + v2 + "]"; diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/CryptoHandler.java b/libs/common/src/main/java/org/opensearch/common/crypto/CryptoHandler.java new file mode 100644 index 0000000000000..9572b5b9054b2 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/crypto/CryptoHandler.java @@ -0,0 +1,118 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.crypto; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.io.InputStreamContainer; + +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; + +/** + * Crypto provider abstractions for encryption and decryption of data. Allows registering multiple providers + * for defining different ways of encrypting or decrypting data. + * + * @param Encryption Metadata / CryptoContext + * @param Parsed Encryption Metadata / CryptoContext + */ +@ExperimentalApi +public interface CryptoHandler extends Closeable { + + /** + * To initialise or create a new crypto metadata to be used in encryption. This is needed to set the context before + * beginning encryption. + * + * @return crypto metadata instance + */ + T initEncryptionMetadata(); + + /** + * To load crypto metadata to be used in encryption from content header. + * Note that underlying information in the loaded metadata object is same as present in the object created during + * encryption but object type may differ. + * + * @param encryptedHeaderContentSupplier supplier for encrypted header content. + * @return crypto metadata instance used in decryption. + */ + U loadEncryptionMetadata(EncryptedHeaderContentSupplier encryptedHeaderContentSupplier) throws IOException; + + /** + * Few encryption algorithms have certain conditions on the unit of content to be encrypted. This requires the + * content size to be re adjusted in order to fulfil these conditions for partial writes. If write requests for + * encryption of a part of content do not fulfil these conditions then encryption fails or can result in corrupted + * content depending on the algorithm used. This method exposes a means to re-adjust sizes of such writes. + * + * @param cryptoContext crypto metadata instance + * @param contentSize Size of the raw content + * @return Adjusted size of the content. + */ + long adjustContentSizeForPartialEncryption(T cryptoContext, long contentSize); + + /** + * Estimate length of the encrypted content. It should only be used to determine length of entire content after + * encryption. + * + * @param cryptoContext crypto metadata instance consisting of encryption metadata used in encryption. + * @param contentLength Size of the raw content + * @return Calculated size of the encrypted content. + */ + long estimateEncryptedLengthOfEntireContent(T cryptoContext, long contentLength); + + /** + * For given encrypted content length, estimate the length of the decrypted content. + * @param cryptoContext crypto metadata instance consisting of encryption metadata used in encryption. + * @param contentLength Size of the encrypted content + * @return Calculated size of the decrypted content. + */ + long estimateDecryptedLength(U cryptoContext, long contentLength); + + /** + * Wraps a raw InputStream with encrypting stream + * + * @param encryptionMetadata created earlier to set the crypto metadata. + * @param stream Raw InputStream to encrypt + * @return encrypting stream wrapped around raw InputStream. + */ + InputStreamContainer createEncryptingStream(T encryptionMetadata, InputStreamContainer stream); + + /** + * Provides encrypted stream for a raw stream emitted for a part of content. + * + * @param cryptoContext crypto metadata instance. + * @param stream raw stream for which encrypted stream has to be created. + * @param totalStreams Number of streams being used for the entire content. + * @param streamIdx Index of the current stream. + * @return Encrypted stream for the provided raw stream. + */ + InputStreamContainer createEncryptingStreamOfPart(T cryptoContext, InputStreamContainer stream, int totalStreams, int streamIdx); + + /** + * This method accepts an encrypted stream and provides a decrypting wrapper. + * @param encryptingStream to be decrypted. + * @return Decrypting wrapper stream + */ + InputStream createDecryptingStream(InputStream encryptingStream); + + /** + * This method creates a {@link DecryptedRangedStreamProvider} which provides a wrapped stream to decrypt the + * underlying stream. This also provides adjusted range against the actual range which should be used for fetching + * and supplying the encrypted content for decryption. Extra content outside the range is trimmed down and returned + * by the decrypted stream. + * For partial reads of encrypted content, few algorithms require the range of content to be adjusted for + * successful decryption. Adjusted range may or may not be same as the provided range. If range is adjusted then + * starting offset of resultant range can be lesser than the starting offset of provided range and end + * offset can be greater than the ending offset of the provided range. + * + * @param cryptoContext crypto metadata instance. + * @param startPosOfRawContent starting position in the raw/decrypted content + * @param endPosOfRawContent ending position in the raw/decrypted content + */ + DecryptedRangedStreamProvider createDecryptingStreamOfRange(U cryptoContext, long startPosOfRawContent, long endPosOfRawContent); +} diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/DataKeyPair.java b/libs/common/src/main/java/org/opensearch/common/crypto/DataKeyPair.java new file mode 100644 index 0000000000000..711c0d314ecef --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/crypto/DataKeyPair.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.common.crypto; + +/** + * Key pair generated by {@link MasterKeyProvider} + */ +public class DataKeyPair { + + /** Unencrypted data key used for encryption and decryption */ + private final byte[] rawKey; + /** Encrypted version of rawKey */ + private final byte[] encryptedKey; + + /** + * Constructor to initialize key-pair values + * @param rawKey Unencrypted data key used for encryption and decryption + * @param encryptedKey Encrypted version of rawKey + */ + public DataKeyPair(byte[] rawKey, byte[] encryptedKey) { + this.rawKey = rawKey; + this.encryptedKey = encryptedKey; + } + + /** + * Returns Unencrypted data key + * @return raw/decrypted key + */ + public byte[] getRawKey() { + return rawKey; + } + + /** + * Returns encrypted key + * @return encrypted key + */ + public byte[] getEncryptedKey() { + return encryptedKey; + } +} diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/DecryptedRangedStreamProvider.java b/libs/common/src/main/java/org/opensearch/common/crypto/DecryptedRangedStreamProvider.java new file mode 100644 index 0000000000000..2cda3c1f8bdb4 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/crypto/DecryptedRangedStreamProvider.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.crypto; + +import java.io.InputStream; +import java.util.function.UnaryOperator; + +/** + * Contains adjusted range of partial encrypted content which needs to be used for decryption. + */ +public class DecryptedRangedStreamProvider { + + /** Adjusted range of partial encrypted content which needs to be used for decryption. */ + private final long[] adjustedRange; + /** Stream provider for decryption and range re-adjustment. */ + private final UnaryOperator decryptedStreamProvider; + + /** + * To construct adjusted encrypted range. + * @param adjustedRange range of partial encrypted content which needs to be used for decryption. + * @param decryptedStreamProvider stream provider for decryption and range re-adjustment. + */ + public DecryptedRangedStreamProvider(long[] adjustedRange, UnaryOperator decryptedStreamProvider) { + this.adjustedRange = adjustedRange; + this.decryptedStreamProvider = decryptedStreamProvider; + } + + /** + * Adjusted range of partial encrypted content which needs to be used for decryption. + * @return adjusted range + */ + public long[] getAdjustedRange() { + return adjustedRange; + } + + /** + * A utility stream provider which supplies the stream responsible for decrypting the content and reading the + * desired range of decrypted content by skipping extra content which got decrypted as a result of range adjustment. + * @return stream provider for decryption and supplying the desired range of content. + */ + public UnaryOperator getDecryptedStreamProvider() { + return decryptedStreamProvider; + } + +} diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/EncryptedHeaderContentSupplier.java b/libs/common/src/main/java/org/opensearch/common/crypto/EncryptedHeaderContentSupplier.java new file mode 100644 index 0000000000000..49a037f05f185 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/crypto/EncryptedHeaderContentSupplier.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.common.crypto; + +import java.io.IOException; + +/** + * This is used in partial decryption. Header information is required for decryption of actual encrypted content. + * Implementation of this supplier only requires first few bytes of encrypted content to be supplied. + */ +public interface EncryptedHeaderContentSupplier { + + /** + * @param start Start position of the encrypted content (Generally supplied as 0 during usage) + * @param end End position of the header. + * @return Encrypted header content (May contain additional content which is later discarded) + * @throws IOException In case content fetch fails. + */ + byte[] supply(long start, long end) throws IOException; +} diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java b/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java new file mode 100644 index 0000000000000..8afa48eb92c0f --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/crypto/MasterKeyProvider.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.common.crypto; + +import java.io.Closeable; +import java.util.Map; + +/** + * Master key provider responsible for management of master keys. + */ +public interface MasterKeyProvider extends Closeable { + + /** + * Returns data key pair + * @return data key pair generated by master key. + */ + DataKeyPair generateDataPair(); + + /** + * Returns decrypted key against the encrypted key. + * @param encryptedKey Key to decrypt + * @return Decrypted version of key. + */ + byte[] decryptKey(byte[] encryptedKey); + + /** + * Returns key id. + * @return key id + */ + String getKeyId(); + + /** + * Returns encryption context associated with this master key. + * @return encryption context associated with this master key. + */ + Map getEncryptionContext(); +} diff --git a/libs/common/src/main/java/org/opensearch/common/crypto/package-info.java b/libs/common/src/main/java/org/opensearch/common/crypto/package-info.java new file mode 100644 index 0000000000000..c744689ebf532 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/crypto/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Common crypto utilities used across opensearch. */ +package org.opensearch.common.crypto; diff --git a/libs/common/src/main/java/org/opensearch/common/hash/T1ha1.java b/libs/common/src/main/java/org/opensearch/common/hash/T1ha1.java new file mode 100644 index 0000000000000..07b2306eda4e5 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/hash/T1ha1.java @@ -0,0 +1,277 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import org.opensearch.common.annotation.InternalApi; + +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.invoke.VarHandle; +import java.nio.ByteOrder; + +import static java.lang.Long.rotateRight; + +/** + * t1ha: Fast Positive Hash + * + *

    + * Implements t1ha1; + * a fast portable hash function with reasonable quality for checksums, hash tables, and thin fingerprinting. + * + *

    + * To overcome language and performance limitations, this implementation differs slightly from the + * reference implementation in C++, + * so the returned values may vary before JDK 18. + * + *

    + * Intended for little-endian systems but returns the same result on big-endian, albeit marginally slower. + * + * @opensearch.internal + */ +@InternalApi +public final class T1ha1 { + private static final long SEED = System.nanoTime(); + private static final Mux64 MUX_64_IMPL = fastestMux64Impl(); + + private static final VarHandle LONG_HANDLE = MethodHandles.byteArrayViewVarHandle(long[].class, ByteOrder.LITTLE_ENDIAN); + private static final VarHandle INT_HANDLE = MethodHandles.byteArrayViewVarHandle(int[].class, ByteOrder.LITTLE_ENDIAN); + private static final VarHandle SHORT_HANDLE = MethodHandles.byteArrayViewVarHandle(short[].class, ByteOrder.LITTLE_ENDIAN); + + // "Magic" primes: + private static final long p0 = 0xEC99BF0D8372CAABL; + private static final long p1 = 0x82434FE90EDCEF39L; + private static final long p2 = 0xD4F06DB99D67BE4BL; + private static final long p3 = 0xBD9CACC22C6E9571L; + private static final long p4 = 0x9C06FAF4D023E3ABL; + private static final long p5 = 0xC060724A8424F345L; + private static final long p6 = 0xCB5AF53AE3AAAC31L; + + // Rotations: + private static final int s0 = 41; + private static final int s1 = 17; + private static final int s2 = 31; + + /** + * No public constructor. + */ + private T1ha1() {} + + /** + * Returns the hash code for the specified range of the given {@code byte} array. + * @param input the input byte array + * @param offset the starting offset + * @param length the length of the range + * @return hash code + */ + public static long hash(byte[] input, int offset, int length) { + return hash(input, offset, length, SEED); + } + + /** + * Returns the hash code for the specified range of the given {@code byte} array. + * @param input the input byte array + * @param offset the starting offset + * @param length the length of the range + * @param seed customized seed + * @return hash code + */ + public static long hash(byte[] input, int offset, int length, long seed) { + long a = seed; + long b = length; + + if (length > 32) { + long c = rotateRight(length, s1) + seed; + long d = length ^ rotateRight(seed, s1); + + do { + long w0 = fetch64(input, offset); + long w1 = fetch64(input, offset + 8); + long w2 = fetch64(input, offset + 16); + long w3 = fetch64(input, offset + 24); + + long d02 = w0 ^ rotateRight(w2 + d, s1); + long c13 = w1 ^ rotateRight(w3 + c, s1); + c += a ^ rotateRight(w0, s0); + d -= b ^ rotateRight(w1, s2); + a ^= p1 * (d02 + w3); + b ^= p0 * (c13 + w2); + + offset += 32; + length -= 32; + } while (length >= 32); + + a ^= p6 * (rotateRight(c, s1) + d); + b ^= p5 * (rotateRight(d, s1) + c); + } + + return h32(input, offset, length, a, b); + } + + /** + * Computes the hash of up to 32 bytes. + * Constants in the switch expression are dense; JVM will use them as indices into a table of + * instruction pointers (tableswitch instruction), making lookups really fast. + */ + @SuppressWarnings("fallthrough") + private static long h32(byte[] input, int offset, int length, long a, long b) { + switch (length) { + default: + b += mux64(fetch64(input, offset), p4); + offset += 8; + length -= 8; + case 24: + case 23: + case 22: + case 21: + case 20: + case 19: + case 18: + case 17: + a += mux64(fetch64(input, offset), p3); + offset += 8; + length -= 8; + case 16: + case 15: + case 14: + case 13: + case 12: + case 11: + case 10: + case 9: + b += mux64(fetch64(input, offset), p2); + offset += 8; + length -= 8; + case 8: + case 7: + case 6: + case 5: + case 4: + case 3: + case 2: + case 1: + a += mux64(tail64(input, offset, length), p1); + case 0: + // Final weak avalanche + return mux64(rotateRight(a + b, s1), p4) + mix64(a ^ b, p0); + } + } + + /** + * XOR the high and low parts of the full 128-bit product. + */ + private static long mux64(long a, long b) { + return MUX_64_IMPL.mux64(a, b); + } + + /** + * XOR-MUL-XOR bit-mixer. + */ + private static long mix64(long a, long b) { + a *= b; + return a ^ rotateRight(a, s0); + } + + /** + * Reads "length" bytes starting at "offset" in little-endian order; returned as long. + * It is assumed that the length is between 1 and 8 (inclusive); but no defensive checks are made as such. + */ + private static long tail64(byte[] input, int offset, int length) { + switch (length) { + case 1: + return fetch8(input, offset); + case 2: + return fetch16(input, offset); + case 3: + return fetch16(input, offset) | (fetch8(input, offset + 2) << 16); + case 4: + return fetch32(input, offset); + case 5: + return fetch32(input, offset) | (fetch8(input, offset + 4) << 32); + case 6: + return fetch32(input, offset) | (fetch16(input, offset + 4) << 32); + case 7: + // This is equivalent to: + // return fetch32(input, offset) | (fetch16(input, offset + 4) << 32) | (fetch8(input, offset + 6) << 48); + // But reading two ints overlapping by one byte is faster due to lesser instructions. + return fetch32(input, offset) | (fetch32(input, offset + 3) << 24); + default: + return fetch64(input, offset); + } + } + + /** + * Reads a 64-bit long. + */ + private static long fetch64(byte[] input, int offset) { + return (long) LONG_HANDLE.get(input, offset); + } + + /** + * Reads a 32-bit unsigned integer, returned as long. + */ + private static long fetch32(byte[] input, int offset) { + return (int) INT_HANDLE.get(input, offset) & 0xFFFFFFFFL; + } + + /** + * Reads a 16-bit unsigned short, returned as long. + */ + private static long fetch16(byte[] input, int offset) { + return (short) SHORT_HANDLE.get(input, offset) & 0xFFFFL; + } + + /** + * Reads an 8-bit unsigned byte, returned as long. + */ + private static long fetch8(byte[] input, int offset) { + return input[offset] & 0xFFL; + } + + /** + * The implementation of mux64. + */ + @FunctionalInterface + private interface Mux64 { + long mux64(long a, long b); + } + + /** + * Provides the fastest available implementation of mux64 on this platform. + * + *

    + * Ideally, the following should be returned to match the reference implementation: + * {@code Math.unsignedMultiplyHigh(a, b) ^ (a * b)} + * + *

    + * Since unsignedMultiplyHigh isn't available before JDK 18, and calculating it without intrinsics is quite slow, + * the multiplyHigh method is used instead. Slight loss in quality is imperceptible for our use-case: a hash table. + * {@code Math.multiplyHigh(a, b) ^ (a * b)} + * + *

    + * This indirection can be removed once we stop supporting older JDKs. + */ + private static Mux64 fastestMux64Impl() { + try { + final MethodHandle unsignedMultiplyHigh = MethodHandles.publicLookup() + .findStatic(Math.class, "unsignedMultiplyHigh", MethodType.methodType(long.class, long.class, long.class)); + return (a, b) -> { + try { + return (long) unsignedMultiplyHigh.invokeExact(a, b) ^ (a * b); + } catch (Throwable e) { + throw new RuntimeException(e); + } + }; + } catch (NoSuchMethodException e) { + return (a, b) -> Math.multiplyHigh(a, b) ^ (a * b); + } catch (IllegalAccessException e) { + throw new RuntimeException(e); + } + } +} diff --git a/server/src/main/java/org/opensearch/common/rounding/package-info.java b/libs/common/src/main/java/org/opensearch/common/hash/package-info.java similarity index 72% rename from server/src/main/java/org/opensearch/common/rounding/package-info.java rename to libs/common/src/main/java/org/opensearch/common/hash/package-info.java index 5fa3e39c6a786..bd393b8b921ed 100644 --- a/server/src/main/java/org/opensearch/common/rounding/package-info.java +++ b/libs/common/src/main/java/org/opensearch/common/hash/package-info.java @@ -6,5 +6,7 @@ * compatible open source license. */ -/** Base DateTime rounding package. */ -package org.opensearch.common.rounding; +/** + * Common hashing utilities. + */ +package org.opensearch.common.hash; diff --git a/libs/common/src/main/java/org/opensearch/common/io/PathUtils.java b/libs/common/src/main/java/org/opensearch/common/io/PathUtils.java index b3526859933ec..ed8d50892b74a 100644 --- a/libs/common/src/main/java/org/opensearch/common/io/PathUtils.java +++ b/libs/common/src/main/java/org/opensearch/common/io/PathUtils.java @@ -93,7 +93,7 @@ public static Path get(URI uri) { /** * Tries to resolve the given path against the list of available roots. - * + *

    * If path starts with one of the listed roots, it returned back by this method, otherwise null is returned. */ public static Path get(Path[] roots, String path) { @@ -109,7 +109,7 @@ public static Path get(Path[] roots, String path) { /** * Tries to resolve the given file uri against the list of available roots. - * + *

    * If uri starts with one of the listed roots, it returned back by this method, otherwise null is returned. */ public static Path get(Path[] roots, URI uri) { diff --git a/libs/common/src/main/java/org/opensearch/common/lifecycle/Lifecycle.java b/libs/common/src/main/java/org/opensearch/common/lifecycle/Lifecycle.java index e76d49cbf49e8..c1cf9b2998a13 100644 --- a/libs/common/src/main/java/org/opensearch/common/lifecycle/Lifecycle.java +++ b/libs/common/src/main/java/org/opensearch/common/lifecycle/Lifecycle.java @@ -32,6 +32,8 @@ package org.opensearch.common.lifecycle; +import org.opensearch.common.annotation.PublicApi; + /** * Lifecycle state. Allows the following transitions: *

      @@ -73,15 +75,17 @@ * } *
* - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Lifecycle { /** * State in the lifecycle * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum State { INITIALIZED, STOPPED, diff --git a/libs/common/src/main/java/org/opensearch/common/lifecycle/LifecycleComponent.java b/libs/common/src/main/java/org/opensearch/common/lifecycle/LifecycleComponent.java index f343f9ada01ef..781c276fefe13 100644 --- a/libs/common/src/main/java/org/opensearch/common/lifecycle/LifecycleComponent.java +++ b/libs/common/src/main/java/org/opensearch/common/lifecycle/LifecycleComponent.java @@ -32,13 +32,15 @@ package org.opensearch.common.lifecycle; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; /** * Base interface for a lifecycle component. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface LifecycleComponent extends Releasable { Lifecycle.State lifecycleState(); diff --git a/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java b/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java index a4fbc6cb65b0d..0f289c09bbae2 100644 --- a/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java +++ b/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java @@ -368,7 +368,7 @@ public static InetAddress forString(String ipString) { /** * Convert a byte array into an InetAddress. - * + *

* {@link InetAddress#getByAddress} is documented as throwing a checked * exception "if IP address is of illegal length." We replace it with * an unchecked exception, for use by callers who already know that addr @@ -423,7 +423,7 @@ public static Tuple parseCidr(String maskedAddress) { /** * Given an address and prefix length, returns the string representation of the range in CIDR notation. - * + *

* See {@link #toAddrString} for details on how the address is represented. */ public static String toCidrString(InetAddress address, int prefixLength) { diff --git a/libs/common/src/main/java/org/opensearch/common/unit/TimeValue.java b/libs/common/src/main/java/org/opensearch/common/unit/TimeValue.java index 670275397893c..30ed5bf63a748 100644 --- a/libs/common/src/main/java/org/opensearch/common/unit/TimeValue.java +++ b/libs/common/src/main/java/org/opensearch/common/unit/TimeValue.java @@ -32,6 +32,8 @@ package org.opensearch.common.unit; +import org.opensearch.common.annotation.PublicApi; + import java.util.Locale; import java.util.Objects; import java.util.concurrent.TimeUnit; @@ -41,6 +43,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public class TimeValue implements Comparable { /** How many nano-seconds in one milli-second */ @@ -221,10 +224,10 @@ public double getDaysFrac() { /** * Returns a {@link String} representation of the current {@link TimeValue}. - * + *

* Note that this method might produce fractional time values (ex 1.6m) which cannot be * parsed by method like {@link TimeValue#parse(String, String, String, String)}. - * + *

* Also note that the maximum string value that will be generated is * {@code 106751.9d} due to the way that values are internally converted * to nanoseconds (106751.9 days is Long.MAX_VALUE nanoseconds) @@ -236,12 +239,12 @@ public String toString() { /** * Returns a {@link String} representation of the current {@link TimeValue}. - * + *

* Note that this method might produce fractional time values (ex 1.6m) which cannot be * parsed by method like {@link TimeValue#parse(String, String, String, String)}. The number of * fractional decimals (up to 10 maximum) are truncated to the number of fraction pieces * specified. - * + *

* Also note that the maximum string value that will be generated is * {@code 106751.9d} due to the way that values are internally converted * to nanoseconds (106751.9 days is Long.MAX_VALUE nanoseconds) diff --git a/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java b/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java index 8762217916c7a..d6ea4fa359df3 100644 --- a/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java +++ b/libs/common/src/main/java/org/opensearch/common/util/BitMixer.java @@ -25,9 +25,9 @@ /** * Bit mixing utilities from carrotsearch.hppc. - * + *

* Licensed under ALv2. This is pulled in directly to avoid a full hppc dependency. - * + *

* The purpose of these methods is to evenly distribute key space over int32 * range. */ @@ -111,7 +111,7 @@ public static int mix32(int k) { /** * Computes David Stafford variant 9 of 64bit mix function (MH3 finalization step, * with different shifts and constants). - * + *

* Variant 9 is picked because it contains two 32-bit shifts which could be possibly * optimized into better machine code. * diff --git a/libs/common/src/test/java/org/opensearch/common/NumbersTests.java b/libs/common/src/test/java/org/opensearch/common/NumbersTests.java index 5fb85d815ded2..7990ba74f162a 100644 --- a/libs/common/src/test/java/org/opensearch/common/NumbersTests.java +++ b/libs/common/src/test/java/org/opensearch/common/NumbersTests.java @@ -221,4 +221,25 @@ public void testToUnsignedBigInteger() { assertEquals(random, Numbers.toUnsignedBigInteger(random.longValue())); assertEquals(Numbers.MAX_UNSIGNED_LONG_VALUE, Numbers.toUnsignedBigInteger(Numbers.MAX_UNSIGNED_LONG_VALUE.longValue())); } + + public void testNextPowerOfTwo() { + // Negative values: + for (int i = 0; i < 1000; i++) { + long value = randomLongBetween(-500000, -1); + assertEquals(1, Numbers.nextPowerOfTwo(value)); + } + + // Zero value: + assertEquals(1, Numbers.nextPowerOfTwo(0L)); + + // Positive values: + for (int i = 0; i < 1000; i++) { + long value = randomLongBetween(1, 500000); + long nextPowerOfTwo = Numbers.nextPowerOfTwo(value); + + assertTrue(nextPowerOfTwo > value); // must be strictly greater + assertTrue((nextPowerOfTwo >>> 1) <= value); // must be greater by no more than one power of two + assertEquals(0, nextPowerOfTwo & (nextPowerOfTwo - 1)); // must be a power of two + } + } } diff --git a/libs/common/src/test/java/org/opensearch/common/hash/T1Ha1Tests.java b/libs/common/src/test/java/org/opensearch/common/hash/T1Ha1Tests.java new file mode 100644 index 0000000000000..e348fbf759bdd --- /dev/null +++ b/libs/common/src/test/java/org/opensearch/common/hash/T1Ha1Tests.java @@ -0,0 +1,312 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.invoke.VarHandle; +import java.nio.ByteOrder; + +public class T1Ha1Tests extends HashFunctionTestCase { + private static final VarHandle LONG_HANDLE = MethodHandles.byteArrayViewVarHandle(long[].class, ByteOrder.LITTLE_ENDIAN); + private final byte[] scratch = new byte[8]; + + /** + * Inspired from the tests defined in the reference implementation: + * t1ha_selfcheck.c + */ + public void testSelfCheck() { + byte[] testPattern = { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + (byte) 0xFF, + 0x7F, + 0x3F, + 0x1F, + 0xF, + 8, + 16, + 32, + 64, + (byte) 0x80, + (byte) 0xFE, + (byte) 0xFC, + (byte) 0xF8, + (byte) 0xF0, + (byte) 0xE0, + (byte) 0xC0, + (byte) 0xFD, + (byte) 0xFB, + (byte) 0xF7, + (byte) 0xEF, + (byte) 0xDF, + (byte) 0xBF, + 0x55, + (byte) 0xAA, + 11, + 17, + 19, + 23, + 29, + 37, + 42, + 43, + 'a', + 'b', + 'c', + 'd', + 'e', + 'f', + 'g', + 'h', + 'i', + 'j', + 'k', + 'l', + 'm', + 'n', + 'o', + 'p', + 'q', + 'r', + 's', + 't', + 'u', + 'v', + 'w', + 'x' }; + + // Reference hashes when using {@link Math::unsignedMultiplyHigh} in the mux64 step. + // These values match the ones defined in the reference implementation: + // https://github.com/erthink/t1ha/blob/master/src/t1ha1_selfcheck.c#L51-L72 + long[] referenceUnsignedMultiplyHigh = { + 0L, + 0x6A580668D6048674L, + 0xA2FE904AFF0D0879L, + 0xE3AB9C06FAF4D023L, + 0x6AF1C60874C95442L, + 0xB3557E561A6C5D82L, + 0x0AE73C696F3D37C0L, + 0x5EF25F7062324941L, + 0x9B784F3B4CE6AF33L, + 0x6993BB206A74F070L, + 0xF1E95DF109076C4CL, + 0x4E1EB70C58E48540L, + 0x5FDD7649D8EC44E4L, + 0x559122C706343421L, + 0x380133D58665E93DL, + 0x9CE74296C8C55AE4L, + 0x3556F9A5757AB6D0L, + 0xF62751F7F25C469EL, + 0x851EEC67F6516D94L, + 0xED463EE3848A8695L, + 0xDC8791FEFF8ED3ACL, + 0x2569C744E1A282CFL, + 0xF90EB7C1D70A80B9L, + 0x68DFA6A1B8050A4CL, + 0x94CCA5E8210D2134L, + 0xF5CC0BEABC259F52L, + 0x40DBC1F51618FDA7L, + 0x0807945BF0FB52C6L, + 0xE5EF7E09DE70848DL, + 0x63E1DF35FEBE994AL, + 0x2025E73769720D5AL, + 0xAD6120B2B8A152E1L, + 0x2A71D9F13959F2B7L, + 0x8A20849A27C32548L, + 0x0BCBC9FE3B57884EL, + 0x0E028D255667AEADL, + 0xBE66DAD3043AB694L, + 0xB00E4C1238F9E2D4L, + 0x5C54BDE5AE280E82L, + 0x0E22B86754BC3BC4L, + 0x016707EBF858B84DL, + 0x990015FBC9E095EEL, + 0x8B9AF0A3E71F042FL, + 0x6AA56E88BD380564L, + 0xAACE57113E681A0FL, + 0x19F81514AFA9A22DL, + 0x80DABA3D62BEAC79L, + 0x715210412CABBF46L, + 0xD8FA0B9E9D6AA93FL, + 0x6C2FC5A4109FD3A2L, + 0x5B3E60EEB51DDCD8L, + 0x0A7C717017756FE7L, + 0xA73773805CA31934L, + 0x4DBD6BB7A31E85FDL, + 0x24F619D3D5BC2DB4L, + 0x3E4AF35A1678D636L, + 0x84A1A8DF8D609239L, + 0x359C862CD3BE4FCDL, + 0xCF3A39F5C27DC125L, + 0xC0FF62F8FD5F4C77L, + 0x5E9F2493DDAA166CL, + 0x17424152BE1CA266L, + 0xA78AFA5AB4BBE0CDL, + 0x7BFB2E2CEF118346L, + 0x647C3E0FF3E3D241L, + 0x0352E4055C13242EL, + 0x6F42FC70EB660E38L, + 0x0BEBAD4FABF523BAL, + 0x9269F4214414D61DL, + 0x1CA8760277E6006CL, + 0x7BAD25A859D87B5DL, + 0xAD645ADCF7414F1DL, + 0xB07F517E88D7AFB3L, + 0xB321C06FB5FFAB5CL, + 0xD50F162A1EFDD844L, + 0x1DFD3D1924FBE319L, + 0xDFAEAB2F09EF7E78L, + 0xA7603B5AF07A0B1EL, + 0x41CD044C0E5A4EE3L, + 0xF64D2F86E813BF33L, + 0xFF9FDB99305EB06AL }; + + // Reference hashes when using {@link Math::multiplyHigh} in the mux64 step. + long[] referenceMultiplyHigh = { + 0L, + 0xCE510B7405E0A2CAL, + 0xC0A2DA74A8271FCBL, + 0x1C549C06FAF4D023L, + 0x084CDA0ED41CD2D4L, + 0xD05BA7AA9FEECE5BL, + 0x7D6128AB2CCC4EB1L, + 0x62332FA6EC1B50AAL, + 0x1B66C81767870EF2L, + 0xEC6B92A37AED73B8L, + 0x1712987232EF4ED3L, + 0xAA503A04AE2450B5L, + 0x15D25DE445730A6CL, + 0xAB87E38AA8D21746L, + 0x18CAE735BBF62D15L, + 0x0D56DFF9914CA656L, + 0xCB4F5859A9AE5B52L, + 0xEE97003F7B1283E1L, + 0x50CFB2AF0F54BA6DL, + 0x570B4D6AE4C67814L, + 0x1ED59274A97497EBL, + 0x8608D03D165C59BFL, + 0x6CBE0E537BE04C02L, + 0xD4C8FCFD4179A874L, + 0xFB4E677D876118A1L, + 0x6B1A96F1B4765D79L, + 0x1075B9B89BDFE5F8L, + 0x02771D08F2891CB1L, + 0x4BB8E16FF410F19EL, + 0x3EB7849C0DFAF566L, + 0x173B09359DE422CFL, + 0xFE212C6DB7474306L, + 0xA74E7C2D632664EFL, + 0x56ECDED6546F0914L, + 0x08DEF866EF20A94BL, + 0x7D0BAC64606521F1L, + 0xCA6BA9817A357FA9L, + 0x0873B834A6E2AAE4L, + 0x45EE02D6DCF8992EL, + 0x3EA060225B3E1C1FL, + 0x24DBB6D02D5CC531L, + 0xE5E91A7340BF9382L, + 0x28975F86E2E2177FL, + 0x80E48374A6B42E85L, + 0xDF40392265BB4A66L, + 0x43750475A48C7023L, + 0x5648BD3E391C01D3L, + 0x9BE9E11AD1A6C369L, + 0x2E079CB8C1A11F50L, + 0xB2D538403F1020F1L, + 0x297518A4EF6AF5F1L, + 0xA8CE1B90167A6F8BL, + 0xB926B2FA50541BA9L, + 0xC46A2D3BD6925A35L, + 0x3071BC8E6C400487L, + 0x300D3885894BA47FL, + 0x840BFF3BEB7EEADDL, + 0xDC9E04DF744BDC0CL, + 0xBE01CF6841412C77L, + 0x6C55B2DC74B816A1L, + 0x4D4C63128A344F82L, + 0xC6227497E100B463L, + 0x53C9987705EA71C0L, + 0x3E355394668C3559L, + 0x05984B7D358B107AL, + 0x4D32FA1D79002A57L, + 0x910B0DAD1440EC24L, + 0x025BDE6A7BEBF320L, + 0x0D33817EF345D999L, + 0xBA0DE64B3F4DB34AL, + 0x54666461D0EB4FD7L, + 0x746ECFA92D1CAF81L, + 0x6E6A774ACD266DF2L, + 0x1A86161AE8E82A85L, + 0xFFF7C351A4CEC13DL, + 0xFFF05844F57498B8L, + 0x8DB71789127C6C13L, + 0x4A52ACF805F370ABL, + 0xFE13F90A1ACFBD58L, + 0x615730E301ED12E2L, + 0x1A2D4AA43B6C0103L }; + + long[] reference = hasUnsignedMultiplyHigh() ? referenceUnsignedMultiplyHigh : referenceMultiplyHigh; + + int offset = 0; + assertEquals(reference[offset++], T1ha1.hash(null, 0, 0, 0L)); // empty-zero + assertEquals(reference[offset++], T1ha1.hash(null, 0, 0, ~0L)); // empty-all1 + assertEquals(reference[offset++], T1ha1.hash(testPattern, 0, 64, 0L)); // bin64-zero + + long seed = 1; + for (int i = 1; i < 64; i++) { + assertEquals(reference[offset++], T1ha1.hash(testPattern, 0, i, seed)); // bin%i-1p%i + seed <<= 1; + } + + seed = ~0L; + for (int i = 1; i <= 7; i++) { + seed <<= 1; + assertEquals(reference[offset++], T1ha1.hash(testPattern, i, 64 - i, seed)); // align%i_F%i + } + + byte[] testPatternLong = new byte[512]; + for (int i = 0; i < testPatternLong.length; i++) { + testPatternLong[i] = (byte) i; + } + for (int i = 0; i <= 7; i++) { + assertEquals(reference[offset++], T1ha1.hash(testPatternLong, i, 128 + i * 17, seed)); // long-%05i + } + } + + @Override + public byte[] hash(byte[] input) { + long hash = T1ha1.hash(input, 0, input.length); + LONG_HANDLE.set(scratch, 0, hash); + return scratch; + } + + @Override + public int outputBits() { + return 64; + } + + private static boolean hasUnsignedMultiplyHigh() { + try { + MethodHandles.publicLookup() + .findStatic(Math.class, "unsignedMultiplyHigh", MethodType.methodType(long.class, long.class, long.class)); + return true; + } catch (NoSuchMethodException e) { + return false; + } catch (IllegalAccessException e) { + throw new RuntimeException(e); + } + } +} diff --git a/libs/compress/src/main/java/org/opensearch/compress/ZstdCompressor.java b/libs/compress/src/main/java/org/opensearch/compress/ZstdCompressor.java index 01afc368fb120..e2a740f72be93 100644 --- a/libs/compress/src/main/java/org/opensearch/compress/ZstdCompressor.java +++ b/libs/compress/src/main/java/org/opensearch/compress/ZstdCompressor.java @@ -30,10 +30,13 @@ * @opensearch.experimental - class methods might change */ public class ZstdCompressor implements Compressor { - // An arbitrary header that we use to identify compressed streams - // It needs to be different from other compressors and to not be specific - // enough so that no stream starting with these bytes could be detected as - // a XContent + + /** + * An arbitrary header that we use to identify compressed streams + * It needs to be different from other compressors and to not be specific + * enough so that no stream starting with these bytes could be detected as + * a XContent + * */ private static final byte[] HEADER = new byte[] { 'Z', 'S', 'T', 'D', '\0' }; /** @@ -44,10 +47,20 @@ public class ZstdCompressor implements Compressor { @PublicApi(since = "2.10.0") public static final String NAME = "ZSTD"; + /** + * The compression level for {@link ZstdOutputStreamNoFinalizer} + */ private static final int LEVEL = 3; + /** The buffer size for {@link BufferedInputStream} and {@link BufferedOutputStream} + */ private static final int BUFFER_SIZE = 4096; + /** + * Compares the given bytes with the {@link ZstdCompressor#HEADER} of a compressed stream + * @param bytes the bytes to compare to ({@link ZstdCompressor#HEADER}) + * @return true if the bytes are the {@link ZstdCompressor#HEADER}, false otherwise + */ @Override public boolean isCompressed(BytesReference bytes) { if (bytes.length() < HEADER.length) { @@ -61,11 +74,22 @@ public boolean isCompressed(BytesReference bytes) { return true; } + /** + * Returns the length of the {@link ZstdCompressor#HEADER} + * @return the {@link ZstdCompressor#HEADER} length + */ @Override public int headerLength() { return HEADER.length; } + /** + * Returns a new {@link ZstdInputStreamNoFinalizer} from the given compressed {@link InputStream} + * @param in the compressed {@link InputStream} + * @return a new {@link ZstdInputStreamNoFinalizer} from the given compressed {@link InputStream} + * @throws IOException if an I/O error occurs + * @throws IllegalArgumentException if the input stream is not compressed with ZSTD + */ @Override public InputStream threadLocalInputStream(InputStream in) throws IOException { final byte[] header = in.readNBytes(HEADER.length); @@ -75,17 +99,36 @@ public InputStream threadLocalInputStream(InputStream in) throws IOException { return new ZstdInputStreamNoFinalizer(new BufferedInputStream(in, BUFFER_SIZE), RecyclingBufferPool.INSTANCE); } + /** + * Returns a new {@link ZstdOutputStreamNoFinalizer} from the given {@link OutputStream} + * @param out the {@link OutputStream} + * @return a new {@link ZstdOutputStreamNoFinalizer} from the given {@link OutputStream} + * @throws IOException if an I/O error occurs + */ @Override public OutputStream threadLocalOutputStream(OutputStream out) throws IOException { out.write(HEADER); return new ZstdOutputStreamNoFinalizer(new BufferedOutputStream(out, BUFFER_SIZE), RecyclingBufferPool.INSTANCE, LEVEL); } + /** + * Always throws an {@link UnsupportedOperationException} as ZSTD compression is supported only for snapshotting + * @param bytesReference a reference to the bytes to uncompress + * @return always throws an exception + * @throws UnsupportedOperationException if the method is called + * @throws IOException is never thrown + */ @Override public BytesReference uncompress(BytesReference bytesReference) throws IOException { throw new UnsupportedOperationException("ZSTD compression is supported only for snapshotting"); } + /** + * Always throws an {@link UnsupportedOperationException} as ZSTD compression is supported only for snapshotting + * @param bytesReference a reference to the bytes to compress + * @return always throws an exception + * @throws UnsupportedOperationException if the method is called + */ @Override public BytesReference compress(BytesReference bytesReference) throws IOException { throw new UnsupportedOperationException("ZSTD compression is supported only for snapshotting"); diff --git a/libs/compress/src/main/java/org/opensearch/compress/spi/CompressionProvider.java b/libs/compress/src/main/java/org/opensearch/compress/spi/CompressionProvider.java index 58bf24a210bae..f0c6969377d78 100644 --- a/libs/compress/src/main/java/org/opensearch/compress/spi/CompressionProvider.java +++ b/libs/compress/src/main/java/org/opensearch/compress/spi/CompressionProvider.java @@ -23,7 +23,10 @@ */ public class CompressionProvider implements CompressorProvider { - /** Returns the concrete {@link Compressor}s provided by the compress library */ + /** + * Returns the concrete {@link Compressor}s provided by the compress library + * @return a list of {@link Compressor}s + * */ @SuppressWarnings({ "unchecked", "rawtypes" }) @Override public List> getCompressors() { diff --git a/libs/core/licenses/log4j-api-2.20.0.jar.sha1 b/libs/core/licenses/log4j-api-2.20.0.jar.sha1 deleted file mode 100644 index 37154d9861ac0..0000000000000 --- a/libs/core/licenses/log4j-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fe6082e660daf07c689a89c94dc0f49c26b44bb \ No newline at end of file diff --git a/libs/core/licenses/log4j-api-2.21.0.jar.sha1 b/libs/core/licenses/log4j-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..51446052594aa --- /dev/null +++ b/libs/core/licenses/log4j-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +760192f2b69eacf4a4afc78e5a1d7a8de054fcbd \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.7.0.jar.sha1 b/libs/core/licenses/lucene-core-9.7.0.jar.sha1 deleted file mode 100644 index 2b0f77275c0ab..0000000000000 --- a/libs/core/licenses/lucene-core-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad391210ffd806931334be9670a35af00c56f959 \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.8.0.jar.sha1 b/libs/core/licenses/lucene-core-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..f9a3e2f3cbee6 --- /dev/null +++ b/libs/core/licenses/lucene-core-9.8.0.jar.sha1 @@ -0,0 +1 @@ +5e8421c5f8573bcf22e9265fc7e19469545a775a \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/Build.java b/libs/core/src/main/java/org/opensearch/Build.java index 67a50a8a31a0e..b5d67f5501725 100644 --- a/libs/core/src/main/java/org/opensearch/Build.java +++ b/libs/core/src/main/java/org/opensearch/Build.java @@ -216,7 +216,7 @@ public String getDistribution() { /** * Get the version as considered at build time - * + *

* Offers a way to get the fully qualified version as configured by the build. * This will be the same as {@link Version} for production releases, but may include on of the qualifier ( e.x alpha1 ) * or -SNAPSHOT for others. diff --git a/libs/core/src/main/java/org/opensearch/LegacyESVersion.java b/libs/core/src/main/java/org/opensearch/LegacyESVersion.java index dafd071ef935a..9f288d06e82f1 100644 --- a/libs/core/src/main/java/org/opensearch/LegacyESVersion.java +++ b/libs/core/src/main/java/org/opensearch/LegacyESVersion.java @@ -40,7 +40,7 @@ /** * The Contents of this file were originally moved from {@link Version}. - * + *

* This class keeps all the supported OpenSearch predecessor versions for * backward compatibility purpose. * diff --git a/libs/core/src/main/java/org/opensearch/OpenSearchException.java b/libs/core/src/main/java/org/opensearch/OpenSearchException.java index 5bad711a15032..cce86b452f698 100644 --- a/libs/core/src/main/java/org/opensearch/OpenSearchException.java +++ b/libs/core/src/main/java/org/opensearch/OpenSearchException.java @@ -168,7 +168,7 @@ public OpenSearchException(Throwable cause) { /** * Construct a OpenSearchException with the specified detail message. - * + *

* The message can be parameterized using {} as placeholders for the given * arguments * @@ -182,7 +182,7 @@ public OpenSearchException(String msg, Object... args) { /** * Construct a OpenSearchException with the specified detail message * and nested exception. - * + *

* The message can be parameterized using {} as placeholders for the given * arguments * @@ -587,7 +587,7 @@ public static OpenSearchException innerFromXContent(XContentParser parser, boole * Static toXContent helper method that renders {@link OpenSearchException} or {@link Throwable} instances * as XContent, delegating the rendering to {@link OpenSearchException#toXContent(XContentBuilder, ToXContent.Params)} * or {@link #innerToXContent(XContentBuilder, ToXContent.Params, Throwable, String, String, Map, Map, Throwable)}. - * + *

* This method is usually used when the {@link Throwable} is rendered as a part of another XContent object, and its result can * be parsed back using the {@code OpenSearchException.fromXContent(XContentParser)} method. */ @@ -606,7 +606,7 @@ public static void generateThrowableXContent(XContentBuilder builder, ToXContent * depends on the value of the "detailed" parameter: when it's false only a simple message based on the type and message of the * exception is rendered. When it's true all detail are provided including guesses root causes, cause and potentially stack * trace. - * + *

* This method is usually used when the {@link Exception} is rendered as a full XContent object, and its output can be parsed * by the {@code #OpenSearchException.failureFromXContent(XContentParser)} method. */ diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 3ec362d4ba9ab..cc2214c16da3b 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -33,6 +33,7 @@ package org.opensearch; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -50,6 +51,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Version implements Comparable, ToXContentFragment { /* * The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator AA @@ -91,6 +93,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_11 = new Version(1031199, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_12 = new Version(1031299, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_13 = new Version(1031399, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_1_3_14 = new Version(1031499, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_0_1 = new Version(2000199, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_0_2 = new Version(2000299, org.apache.lucene.util.Version.LUCENE_9_1_0); @@ -115,7 +118,11 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_9_0 = new Version(2090099, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_9_1 = new Version(2090199, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_10_0 = new Version(2100099, org.apache.lucene.util.Version.LUCENE_9_7_0); - public static final Version CURRENT = V_2_10_0; + public static final Version V_2_10_1 = new Version(2100199, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_11_0 = new Version(2110099, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_11_1 = new Version(2110199, org.apache.lucene.util.Version.LUCENE_9_7_0); + public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_8_0); + public static final Version CURRENT = V_2_12_0; public static Version fromId(int id) { final Version known = LegacyESVersion.idToVersion.get(id); diff --git a/libs/core/src/main/java/org/opensearch/core/ParseField.java b/libs/core/src/main/java/org/opensearch/core/ParseField.java index c2ea39efd82e1..6c04ec0a96361 100644 --- a/libs/core/src/main/java/org/opensearch/core/ParseField.java +++ b/libs/core/src/main/java/org/opensearch/core/ParseField.java @@ -31,6 +31,7 @@ package org.opensearch.core; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.XContentLocation; @@ -43,7 +44,11 @@ /** * Holds a field that can be found in a request while parsing and its different * variants, which may be deprecated. + * + * @opensearch.api + * */ +@PublicApi(since = "1.0.0") public class ParseField { private final String name; private final String[] deprecatedNames; diff --git a/libs/core/src/main/java/org/opensearch/core/action/ActionListener.java b/libs/core/src/main/java/org/opensearch/core/action/ActionListener.java index dbebf750c1183..48c8125f0f71f 100644 --- a/libs/core/src/main/java/org/opensearch/core/action/ActionListener.java +++ b/libs/core/src/main/java/org/opensearch/core/action/ActionListener.java @@ -37,6 +37,7 @@ import org.opensearch.common.CheckedFunction; import org.opensearch.common.CheckedRunnable; import org.opensearch.common.CheckedSupplier; +import org.opensearch.common.annotation.PublicApi; import java.util.ArrayList; import java.util.List; @@ -48,6 +49,7 @@ * * @opensearch.internal */ +@PublicApi(since = "1.0.0") public interface ActionListener { /** * Handle action response. This response may constitute a failure or a @@ -152,9 +154,9 @@ static ActionListener wrap(Runnable runnable) { /** * Creates a listener that wraps another listener, mapping response values via the given mapping function and passing along * exceptions to the delegate. - * + *

* Notice that it is considered a bug if the listener's onResponse or onFailure fails. onResponse failures will not call onFailure. - * + *

* If the function fails, the listener's onFailure handler will be called. The principle is that the mapped listener will handle * exceptions from the mapping function {@code fn} but it is the responsibility of {@code delegate} to handle its own exceptions * inside `onResponse` and `onFailure`. @@ -332,7 +334,7 @@ protected void innerOnFailure(Exception e) { /** * Completes the given listener with the result from the provided supplier accordingly. * This method is mainly used to complete a listener with a block of synchronous code. - * + *

* If the supplier fails, the listener's onFailure handler will be called. * It is the responsibility of {@code delegate} to handle its own exceptions inside `onResponse` and `onFailure`. */ diff --git a/libs/core/src/main/java/org/opensearch/core/common/Strings.java b/libs/core/src/main/java/org/opensearch/core/common/Strings.java index 13468118abd38..a75ecd6c01043 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/Strings.java +++ b/libs/core/src/main/java/org/opensearch/core/common/Strings.java @@ -38,7 +38,7 @@ /** * String utility class. - * + *

* TODO replace Strings in :server * * @opensearch.internal diff --git a/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java b/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java index 0f75f763d21c1..846950ff17c63 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java +++ b/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreaker.java @@ -71,17 +71,23 @@ public interface CircuitBreaker { /** * The type of breaker - * + * can be {@link #MEMORY}, {@link #PARENT}, or {@link #NOOP} * @opensearch.internal */ enum Type { - // A regular or ChildMemoryCircuitBreaker + /** A regular or ChildMemoryCircuitBreaker */ MEMORY, - // A special parent-type for the hierarchy breaker service + /** A special parent-type for the hierarchy breaker service */ PARENT, - // A breaker where every action is a noop, it never breaks + /** A breaker where every action is a noop, it never breaks */ NOOP; + /** + * Converts string (case-insensitive) to breaker {@link Type} + * @param value "noop", "parent", or "memory" (case-insensitive) + * @return the breaker {@link Type} + * @throws IllegalArgumentException if value is not "noop", "parent", or "memory" + */ public static Type parseValue(String value) { switch (value.toLowerCase(Locale.ROOT)) { case "noop": @@ -98,13 +104,13 @@ public static Type parseValue(String value) { /** * The breaker durability - * + * can be {@link #TRANSIENT} or {@link #PERMANENT} * @opensearch.internal */ enum Durability { - // The condition that tripped the circuit breaker fixes itself eventually. + /** The condition that tripped the circuit breaker fixes itself eventually. */ TRANSIENT, - // The condition that tripped the circuit breaker requires manual intervention. + /** The condition that tripped the circuit breaker requires manual intervention. */ PERMANENT } @@ -120,11 +126,14 @@ enum Durability { * @param bytes number of bytes to add * @param label string label describing the bytes being added * @return the number of "used" bytes for the circuit breaker + * @throws CircuitBreakingException if the breaker tripped */ double addEstimateBytesAndMaybeBreak(long bytes, String label) throws CircuitBreakingException; /** * Adjust the circuit breaker without tripping + * @param bytes number of bytes to add + * @return the number of "used" bytes for the circuit breaker */ long addWithoutBreaking(long bytes); @@ -154,7 +163,10 @@ enum Durability { String getName(); /** - * @return whether a tripped circuit breaker will reset itself (transient) or requires manual intervention (permanent). + * Returns the {@link Durability} of this breaker + * @return whether a tripped circuit breaker will + * reset itself ({@link Durability#TRANSIENT}) + * or requires manual intervention ({@link Durability#PERMANENT}). */ Durability getDurability(); diff --git a/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreakingException.java b/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreakingException.java index 8a345a0f78efe..52a34a103e775 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreakingException.java +++ b/libs/core/src/main/java/org/opensearch/core/common/breaker/CircuitBreakingException.java @@ -47,8 +47,11 @@ */ public class CircuitBreakingException extends OpenSearchException { + /** The number of bytes wanted */ private final long bytesWanted; + /** The circuit breaker limit */ private final long byteLimit; + /** The {@link CircuitBreaker.Durability} of the circuit breaker */ private final CircuitBreaker.Durability durability; public CircuitBreakingException(StreamInput in) throws IOException { @@ -95,6 +98,7 @@ public CircuitBreaker.Durability getDurability() { return durability; } + /** Always returns {@link RestStatus#TOO_MANY_REQUESTS} */ @Override public RestStatus status() { return RestStatus.TOO_MANY_REQUESTS; diff --git a/libs/core/src/main/java/org/opensearch/core/common/breaker/NoopCircuitBreaker.java b/libs/core/src/main/java/org/opensearch/core/common/breaker/NoopCircuitBreaker.java index 86a0a7ccb96fd..17b9fefd27c99 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/breaker/NoopCircuitBreaker.java +++ b/libs/core/src/main/java/org/opensearch/core/common/breaker/NoopCircuitBreaker.java @@ -33,65 +33,120 @@ package org.opensearch.core.common.breaker; /** - * A CircuitBreaker that doesn't increment or adjust, and all operations are - * basically noops - * + * A {@link CircuitBreaker} that doesn't increment or adjust, and all operations are + * basically noops. + * It never trips, limit is always -1, always returns 0 for all metrics. * @opensearch.internal */ public class NoopCircuitBreaker implements CircuitBreaker { - public static final int LIMIT = -1; + /** The limit of this breaker is always -1 */ + public static final int LIMIT = -1; + /** Name of this breaker */ private final String name; + /** + * Creates a new NoopCircuitBreaker (that never trip) with the given name + * @param name the name of this breaker + */ public NoopCircuitBreaker(String name) { this.name = name; } + /** + * This is a noop, a noop breaker never trip + * @param fieldName name of this noop breaker + * @param bytesNeeded bytes needed + */ @Override public void circuitBreak(String fieldName, long bytesNeeded) { // noop } + /** + * This is a noop, always return 0 and never throw/trip + * @param bytes number of bytes to add + * @param label string label describing the bytes being added + * @return always return 0 + * @throws CircuitBreakingException never thrown + */ @Override public double addEstimateBytesAndMaybeBreak(long bytes, String label) throws CircuitBreakingException { return 0; } + /** + * This is a noop, nothing is added, always return 0 + * @param bytes number of bytes to add (ignored) + * @return always return 0 + */ @Override public long addWithoutBreaking(long bytes) { return 0; } + /** + * This is a noop, always return 0 + * @return always return 0 + */ @Override public long getUsed() { return 0; } + /** + * A noop breaker have a constant limit of -1 + * @return always return -1 + */ @Override public long getLimit() { return LIMIT; } + /** + * A noop breaker have no overhead, always return 0 + * @return always return 0 + */ @Override public double getOverhead() { return 0; } + /** + * A noop breaker never trip, always return 0 + * @return always return 0 + */ @Override public long getTrippedCount() { return 0; } + /** + * return the name of this breaker + * @return the name of this breaker + */ @Override public String getName() { return this.name; } + /** + * A noop breaker {@link Durability} is always {@link Durability#PERMANENT} + * @return always return {@link Durability#PERMANENT } + */ @Override public Durability getDurability() { return Durability.PERMANENT; } + /** + * Limit and overhead are constant for a noop breaker. + * this is a noop. + * @param limit the desired limit (ignored) + * @param overhead the desired overhead (ignored) + */ @Override - public void setLimitAndOverhead(long limit, double overhead) {} + public void setLimitAndOverhead(long limit, double overhead) { + // noop + } } diff --git a/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java b/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java index e054776d67fdc..8c1efcd00c24e 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java +++ b/libs/core/src/main/java/org/opensearch/core/common/bytes/AbstractBytesReference.java @@ -49,7 +49,8 @@ */ public abstract class AbstractBytesReference implements BytesReference { - private Integer hash = null; // we cache the hash of this reference since it can be quite costly to re-calculated it + /** we cache the hash of this reference since it can be quite costly to re-calculated it */ + private Integer hash = null; private static final int MAX_UTF16_LENGTH = Integer.MAX_VALUE >> 1; @Override diff --git a/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java b/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java index 1548c7d4c2e88..bb26e4e8a8675 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java +++ b/libs/core/src/main/java/org/opensearch/core/common/bytes/BytesReference.java @@ -35,6 +35,7 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.BytesStream; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.util.ByteArray; @@ -50,8 +51,9 @@ /** * A reference to bytes. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface BytesReference extends Comparable, ToXContentFragment { /** diff --git a/libs/core/src/main/java/org/opensearch/core/common/bytes/CompositeBytesReference.java b/libs/core/src/main/java/org/opensearch/core/common/bytes/CompositeBytesReference.java index 53915a3da824c..1a48abee2dbf8 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/bytes/CompositeBytesReference.java +++ b/libs/core/src/main/java/org/opensearch/core/common/bytes/CompositeBytesReference.java @@ -45,7 +45,7 @@ /** * A composite {@link BytesReference} that allows joining multiple bytes references * into one without copying. - * + *

* Note, {@link #toBytesRef()} will materialize all pages in this BytesReference. * * @opensearch.internal diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java index a50d1c165ed72..30c84708728ef 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/BytesStreamInput.java @@ -17,7 +17,7 @@ * {@link StreamInput} version of Lucene's {@link org.apache.lucene.store.ByteArrayDataInput} * This is used as a replacement of Lucene ByteArrayDataInput for abstracting byte order changes * in Lucene's API - * + *

* Attribution given to apache lucene project under ALv2: * * Licensed to the Apache Software Foundation (ASF) under one or more diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/NamedWriteableRegistry.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/NamedWriteableRegistry.java index ec707f147cade..123b52eb92876 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/NamedWriteableRegistry.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/NamedWriteableRegistry.java @@ -32,6 +32,8 @@ package org.opensearch.core.common.io.stream; +import org.opensearch.common.annotation.PublicApi; + import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -41,19 +43,21 @@ /** * A registry for {@link Writeable.Reader} readers of {@link NamedWriteable}. - * + *

* The registration is keyed by the combination of the category class of {@link NamedWriteable}, and a name unique * to that category. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NamedWriteableRegistry { /** * An entry in the registry, made up of a category class and name, and a reader for that category class. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Entry { /** The superclass of a {@link NamedWriteable} which will be read by {@link #reader}. */ diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java index b6d4fcf21ca66..2942be9571fcb 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java @@ -46,6 +46,7 @@ import org.opensearch.Version; import org.opensearch.common.CharArrays; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesArray; @@ -95,7 +96,7 @@ /** * A stream from this node to another node. Technically, it can also be streamed to a byte array but that is mostly for testing. - * + *

* This class's methods are optimized so you can put the methods that read and write a class next to each other and you can scan them * visually for differences. That means that most variables should be read and written in a single line so even large objects fit both * reading and writing on the screen. It also means that the methods on this class are named very similarly to {@link StreamOutput}. Finally @@ -104,8 +105,9 @@ * lists, either by storing {@code List}s internally or just converting to and from a {@code List} when calling. This comment is repeated * on {@link StreamInput}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class StreamInput extends InputStream { private Version version = Version.CURRENT; @@ -1124,7 +1126,7 @@ public C readNamedWriteable(@SuppressWarnings("unused * the corresponding entry in the registry by name, so that the proper object can be read and returned. * Default implementation throws {@link UnsupportedOperationException} as StreamInput doesn't hold a registry. * Use {@link FilterInputStream} instead which wraps a stream and supports a {@link NamedWriteableRegistry} too. - * + *

* Prefer {@link StreamInput#readNamedWriteable(Class)} and {@link StreamOutput#writeNamedWriteable(NamedWriteable)} unless you * have a compelling reason to use this method instead. */ diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java index a61278c0cc4de..2d69e1c686df3 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamOutput.java @@ -45,6 +45,7 @@ import org.opensearch.Version; import org.opensearch.common.CharArrays; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; @@ -87,7 +88,7 @@ /** * A stream from another node to this node. Technically, it can also be streamed from a byte array but that is mostly for testing. - * + *

* This class's methods are optimized so you can put the methods that read and write a class next to each other and you can scan them * visually for differences. That means that most variables should be read and written in a single line so even large objects fit both * reading and writing on the screen. It also means that the methods on this class are named very similarly to {@link StreamInput}. Finally @@ -96,8 +97,9 @@ * lists, either by storing {@code List}s internally or just converting to and from a {@code List} when calling. This comment is repeated * on {@link StreamInput}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class StreamOutput extends OutputStream { private static final int MAX_NESTED_EXCEPTION_LEVEL = 100; diff --git a/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java b/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java index bca919e12ea7e..0560fc5c7b7e9 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java +++ b/libs/core/src/main/java/org/opensearch/core/common/logging/LoggerMessageFormat.java @@ -37,6 +37,10 @@ /** * Format string for OpenSearch log messages. + *

+ * This class is almost a copy of {@code org.slf4j.helpers.MessageFormatter}

+ * The original code is licensed under the MIT License and is available at : + * MessageFormatter.java * * @opensearch.internal */ @@ -51,6 +55,17 @@ public static String format(final String messagePattern, final Object... argArra return format(null, messagePattern, argArray); } + /** + * (this is almost a copy of {@code org.slf4j.helpers.MessageFormatter.arrayFormat}) + * + * @param prefix the prefix to prepend to the formatted message (can be null) + * @param messagePattern the message pattern which will be parsed and formatted + * @param argArray an array of arguments to be substituted in place of formatting anchors + * @return null if messagePattern is null

+ * messagePattern if argArray is (null or empty) and prefix is null

+ * prefix + messagePattern if argArray is (null or empty) and prefix is not null

+ * formatted message otherwise (even if prefix is null) + */ public static String format(final String prefix, final String messagePattern, final Object... argArray) { if (messagePattern == null) { return null; @@ -110,6 +125,13 @@ public static String format(final String prefix, final String messagePattern, fi return sbuf.toString(); } + /** + * Checks if (delimterStartIndex - 1) in messagePattern is an escape character. + * @param messagePattern the message pattern + * @param delimiterStartIndex the index of the character to check + * @return true if there is an escape char before the character at delimiterStartIndex.

+ * Always returns false if delimiterStartIndex == 0 (edge case) + */ static boolean isEscapedDelimiter(String messagePattern, int delimiterStartIndex) { if (delimiterStartIndex == 0) { @@ -123,6 +145,13 @@ static boolean isEscapedDelimiter(String messagePattern, int delimiterStartIndex } } + /** + * Checks if (delimterStartIndex - 2) in messagePattern is an escape character. + * @param messagePattern the message pattern + * @param delimiterStartIndex the index of the character to check + * @return true if (delimterStartIndex - 2) in messagePattern is an escape character. + * Always returns false if delimiterStartIndex is less than 2 (edge case) + */ static boolean isDoubleEscaped(String messagePattern, int delimiterStartIndex) { if (delimiterStartIndex >= 2 && messagePattern.charAt(delimiterStartIndex - 2) == ESCAPE_CHAR) { return true; diff --git a/libs/core/src/main/java/org/opensearch/core/common/settings/SecureString.java b/libs/core/src/main/java/org/opensearch/core/common/settings/SecureString.java index f5529bcebc82f..45ee72f558724 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/settings/SecureString.java +++ b/libs/core/src/main/java/org/opensearch/core/common/settings/SecureString.java @@ -32,6 +32,8 @@ package org.opensearch.core.common.settings; +import org.opensearch.common.annotation.PublicApi; + import java.io.Closeable; import java.util.Arrays; import java.util.Objects; @@ -39,15 +41,16 @@ /** * A String implementations which allows clearing the underlying char array. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class SecureString implements CharSequence, Closeable { private char[] chars; /** * Constructs a new SecureString which controls the passed in char array. - * + *

* Note: When this instance is closed, the array will be zeroed out. */ public SecureString(char[] chars) { @@ -56,7 +59,7 @@ public SecureString(char[] chars) { /** * Constructs a new SecureString from an existing String. - * + *

* NOTE: This is not actually secure, since the provided String cannot be deallocated, but * this constructor allows for easy compatibility between new and old apis. * diff --git a/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java b/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java index 1a853877ed0b9..551504ed6f719 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java +++ b/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java @@ -71,6 +71,12 @@ public TransportAddress(InetAddress address, int port) { this(new InetSocketAddress(address, port)); } + /** + * Creates a new {@link TransportAddress} from a {@link InetSocketAddress}. + * @param address the address to wrap + * @throws IllegalArgumentException if the address is null or not resolved + * @see InetSocketAddress#getAddress() + */ public TransportAddress(InetSocketAddress address) { if (address == null) { throw new IllegalArgumentException("InetSocketAddress must not be null"); @@ -82,7 +88,9 @@ public TransportAddress(InetSocketAddress address) { } /** - * Read from a stream. + * Creates a new {@link TransportAddress} from a {@link StreamInput}. + * @param in the stream to read from + * @throws IOException if an I/O error occurs */ public TransportAddress(StreamInput in) throws IOException { final int len = in.readByte(); @@ -116,6 +124,8 @@ public String getAddress() { /** * Returns the addresses port + * @return the port number, or 0 if the socket is not bound yet. + * @see InetSocketAddress#getPort() */ public int getPort() { return address.getPort(); diff --git a/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java b/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java index 68486dd7c975f..49eadbbb2bc00 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java +++ b/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeUnit.java @@ -32,6 +32,7 @@ package org.opensearch.core.common.unit; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -44,9 +45,17 @@ * A {@code SizeUnit} does not maintain size information, but only * helps organize and use size representations that may be maintained * separately across various contexts. + *

+ * It use conventional data storage values (base-2) : + *

    + *
  • 1KB = 1024 bytes
  • + *
  • 1MB = 1024KB
  • + *
  • ...
  • + *
* - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ByteSizeUnit implements Writeable { BYTES { @Override diff --git a/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeValue.java b/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeValue.java index 529501226f5e3..1ed6d2d204a99 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeValue.java +++ b/libs/core/src/main/java/org/opensearch/core/common/unit/ByteSizeValue.java @@ -33,6 +33,7 @@ package org.opensearch.core.common.unit; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,8 +48,9 @@ /** * A byte size value * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ByteSizeValue implements Writeable, Comparable, ToXContentFragment { public static final ByteSizeValue ZERO = new ByteSizeValue(0, ByteSizeUnit.BYTES); diff --git a/libs/core/src/main/java/org/opensearch/core/common/util/CollectionUtils.java b/libs/core/src/main/java/org/opensearch/core/common/util/CollectionUtils.java index e8dd31fcf1869..5335c98182b64 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/util/CollectionUtils.java +++ b/libs/core/src/main/java/org/opensearch/core/common/util/CollectionUtils.java @@ -73,6 +73,16 @@ public static boolean isEmpty(Object[] array) { /** * Return a rotated view of the given list with the given distance. + *
    + *
  • The distance can be negative, in which case the list is rotated to the left.
  • + *
  • The distance can be larger than the size of the list, in which case the list is rotated multiple times.
  • + *
  • The distance can be zero, in which case the list is not rotated.
  • + *
  • The list can be empty, in which case it remains empty.
  • + *
+ * @param list the list to rotate + * @param distance the distance to rotate (positive rotates right, negative rotates left) + * @return a rotated view of the given list with the given distance + * @see RotatedList */ public static List rotate(final List list, int distance) { if (list.isEmpty()) { @@ -92,7 +102,13 @@ public static List rotate(final List list, int distance) { } /** - * in place de-duplicates items in a list + * In place de-duplicates items in a list + * Noop if the list is empty or has one item. + * + * @throws NullPointerException if the list is `null` or comparator is `null` + * @param array the list to de-duplicate + * @param comparator the comparator to use to compare items + * @param the type of the items in the list */ public static void sortAndDedup(final List array, Comparator comparator) { // base case: one item @@ -115,6 +131,12 @@ public static void sortAndDedup(final List array, Comparator comparato array.subList(deduped.nextIndex(), array.size()).clear(); } + /** + * Converts a collection of Integers to an array of ints. + * @param ints The collection of Integers to convert + * @return The array of ints + * @throws NullPointerException if ints is null + */ public static int[] toArray(Collection ints) { Objects.requireNonNull(ints); return ints.stream().mapToInt(s -> s).toArray(); @@ -134,6 +156,11 @@ public static void ensureNoSelfReferences(Object value, String messageHint) { } } + /** + * Converts an object to an Iterable, if possible. + * @param value The object to convert + * @return The Iterable, or null if the object cannot be converted + */ @SuppressWarnings("unchecked") private static Iterable convert(Object value) { if (value == null) { @@ -192,6 +219,13 @@ private static class RotatedList extends AbstractList implements RandomAcc private final List in; private final int distance; + /** + * Creates a rotated list + * @param list The list to rotate + * @param distance The distance to rotate to the right + * @throws IllegalArgumentException if the distance is negative or greater than the size of the list; + * or if the list is not a {@link RandomAccess} list + */ RotatedList(List list, int distance) { if (distance < 0 || distance >= list.size()) { throw new IllegalArgumentException(); @@ -218,6 +252,13 @@ public int size() { } } + /** + * Converts an {@link Iterable} to an {@link ArrayList}. + * @param elements The iterable to convert + * @param the type the elements + * @return an {@link ArrayList} + * @throws NullPointerException if elements is null + */ @SuppressWarnings("unchecked") public static ArrayList iterableAsArrayList(Iterable elements) { if (elements == null) { @@ -297,11 +338,11 @@ public static List> eagerPartition(List list, int size) { } /** - * Check if a collection is empty or not. Empty collection mean either it is null or it has no elements in it. If - * collection contains a null element it means it is not empty. + * Checks if a collection is empty or not. Empty collection mean either it is null or it has no elements in it. + * If collection contains a null element it means it is not empty. * * @param collection {@link Collection} - * @return boolean + * @return true if collection is null or {@code isEmpty()}, false otherwise * @param Element */ public static boolean isEmpty(final Collection collection) { diff --git a/libs/core/src/main/java/org/opensearch/core/compress/Compressor.java b/libs/core/src/main/java/org/opensearch/core/compress/Compressor.java index 27d5b5dfdfa15..5324ea6151e51 100644 --- a/libs/core/src/main/java/org/opensearch/core/compress/Compressor.java +++ b/libs/core/src/main/java/org/opensearch/core/compress/Compressor.java @@ -43,7 +43,7 @@ /** * Compressor interface used for compressing {@link org.opensearch.core.xcontent.MediaType} and * {@code org.opensearch.repositories.blobstore.BlobStoreRepository} implementations. - * + *

* This is not to be confused with {@link org.apache.lucene.codecs.compressing.Compressor} which is used * for codec implementations such as {@code org.opensearch.index.codec.customcodecs.Lucene95CustomCodec} * for compressing {@link org.apache.lucene.document.StoredField}s diff --git a/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java b/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java index 9290254c30d8d..af09a7aebba79 100644 --- a/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java +++ b/libs/core/src/main/java/org/opensearch/core/compress/CompressorRegistry.java @@ -23,7 +23,7 @@ /** * A registry that wraps a static Map singleton which holds a mapping of unique String names (typically the * compressor header as a string) to registerd {@link Compressor} implementations. - * + *

* This enables plugins, modules, extensions to register their own compression implementations through SPI * * @opensearch.experimental @@ -105,7 +105,7 @@ public static Compressor getCompressor(final String name) { /** * Returns the registered compressors as an Immutable collection - * + *

* note: used for testing */ public static Map registeredCompressors() { diff --git a/libs/core/src/main/java/org/opensearch/core/compress/spi/CompressorProvider.java b/libs/core/src/main/java/org/opensearch/core/compress/spi/CompressorProvider.java index 019e282444d64..9b806618fe0a0 100644 --- a/libs/core/src/main/java/org/opensearch/core/compress/spi/CompressorProvider.java +++ b/libs/core/src/main/java/org/opensearch/core/compress/spi/CompressorProvider.java @@ -18,7 +18,7 @@ /** * Service Provider Interface for plugins, modules, extensions providing custom * compression algorithms - * + *

* see {@link Compressor} for implementing methods * and {@link org.opensearch.core.compress.CompressorRegistry} for the registration of custom * Compressors diff --git a/libs/core/src/main/java/org/opensearch/core/index/Index.java b/libs/core/src/main/java/org/opensearch/core/index/Index.java index c7b680dd1f753..a927179114188 100644 --- a/libs/core/src/main/java/org/opensearch/core/index/Index.java +++ b/libs/core/src/main/java/org/opensearch/core/index/Index.java @@ -32,6 +32,7 @@ package org.opensearch.core.index; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -47,14 +48,19 @@ /** * A value class representing the basic required properties of an OpenSearch index. + *

+ * (This class is immutable.) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Index implements Writeable, ToXContentObject { public static final Index[] EMPTY_ARRAY = new Index[0]; private static final String INDEX_UUID_KEY = "index_uuid"; private static final String INDEX_NAME_KEY = "index_name"; + public static final String UNKNOWN_INDEX_NAME = "_unknown_"; + private static final ObjectParser INDEX_PARSER = new ObjectParser<>("index", Builder::new); static { INDEX_PARSER.declareString(Builder::name, new ParseField(INDEX_NAME_KEY)); @@ -64,39 +70,74 @@ public class Index implements Writeable, ToXContentObject { private final String name; private final String uuid; + /** + * Creates a new Index instance with name and unique identifier + * + * @param name the name of the index + * @param uuid the unique identifier of the index + * @throws NullPointerException if either name or uuid are null + */ public Index(String name, String uuid) { this.name = Objects.requireNonNull(name); this.uuid = Objects.requireNonNull(uuid); } /** - * Read from a stream. + * Creates a new Index instance from a {@link StreamInput}. + * Reads the name and unique identifier from the stream. + * + * @param in the stream to read from + * @throws IOException if an error occurs while reading from the stream + * @see #writeTo(StreamOutput) */ public Index(StreamInput in) throws IOException { this.name = in.readString(); this.uuid = in.readString(); } + /** + * Gets the name of the index. + * + * @return the name of the index. + */ public String getName() { return this.name; } + /** + * Gets the unique identifier of the index. + * + * @return the unique identifier of the index. "_na_" if {@link Strings#UNKNOWN_UUID_VALUE}. + */ public String getUUID() { return uuid; } + /** + * Returns either the name and unique identifier of the index + * or only the name if the uuid is {@link Strings#UNKNOWN_UUID_VALUE}. + *

+ * If we have a uuid we put it in the toString so it'll show up in logs + * which is useful as more and more things use the uuid rather + * than the name as the lookup key for the index. + * + * @return {@code "[name/uuid]"} or {@code "[name]"} + */ @Override public String toString() { - /* - * If we have a uuid we put it in the toString so it'll show up in logs which is useful as more and more things use the uuid rather - * than the name as the lookup key for the index. - */ if (Strings.UNKNOWN_UUID_VALUE.equals(uuid)) { return "[" + name + "]"; } return "[" + name + "/" + uuid + "]"; } + /** + * Checks if this index is the same as another index by comparing the name and unique identifier. + * If both uuid are {@link Strings#UNKNOWN_UUID_VALUE} then only the name is compared. + * + * @param o the index to compare to + * @return true if the name and unique identifier are the same, false otherwise. + */ @Override public boolean equals(Object o) { if (this == o) { @@ -116,6 +157,10 @@ public int hashCode() { return result; } + /** Writes the name and unique identifier to the {@link StreamOutput} + * + * @param out The stream to write to + */ @Override public void writeTo(final StreamOutput out) throws IOException { out.writeString(name); diff --git a/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java b/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java index f6980be94ca49..c0abad7ed727f 100644 --- a/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java +++ b/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java @@ -32,6 +32,7 @@ package org.opensearch.core.index.shard; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,52 +46,96 @@ /** * Allows for shard level components to be injected with the shard id. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardId implements Comparable, ToXContentFragment, Writeable { private final Index index; private final int shardId; private final int hashCode; + /** + * Constructs a new shard id. + * @param index the index name + * @param shardId the shard id + */ public ShardId(Index index, int shardId) { this.index = index; this.shardId = shardId; this.hashCode = computeHashCode(); } + /** + * Constructs a new shard id with the given index name, index unique identifier, and shard id. + * @param index the index name + * @param indexUUID the index unique identifier + * @param shardId the shard id + */ public ShardId(String index, String indexUUID, int shardId) { this(new Index(index, indexUUID), shardId); } + /** + * Constructs a new shardId from a stream. + * @param in the stream to read from + * @throws IOException if an error occurs while reading from the stream + * @see #writeTo(StreamOutput) + */ public ShardId(StreamInput in) throws IOException { index = new Index(in); shardId = in.readVInt(); hashCode = computeHashCode(); } + /** + * Writes this shard id to a stream. + * @param out the stream to write to + * @throws IOException if an error occurs while writing to the stream + */ @Override public void writeTo(StreamOutput out) throws IOException { index.writeTo(out); out.writeVInt(shardId); } + /** + * Returns the index of this shard id. + * @return the index of this shard id + */ public Index getIndex() { return index; } + /** + * Returns the name of the index of this shard id. + * @return the name of the index of this shard id + */ public String getIndexName() { return index.getName(); } + /** + * Return the shardId of this shard id. + * @return the shardId of this shard id + * @see #getId() + */ public int id() { return this.shardId; } + /** + * Returns the shard id of this shard id. + * @return the shard id of this shard id + */ public int getId() { return id(); } + /** + * Returns a string representation of this shard id. + * @return "[indexName][shardId]" + */ @Override public String toString() { return "[" + index.getName() + "][" + shardId + "]"; @@ -98,9 +143,13 @@ public String toString() { /** * Parse the string representation of this shardId back to an object. + *

* We lose index uuid information here, but since we use toString in * rest responses, this is the best we can do to reconstruct the object * on the client side. + * + * @param shardIdString the string representation of the shard id + * (Expect a string of format "[indexName][shardId]" (square brackets included)) */ public static ShardId fromString(String shardIdString) { int splitPosition = shardIdString.indexOf("]["); @@ -120,17 +169,30 @@ public boolean equals(Object o) { return shardId == shardId1.shardId && index.equals(shardId1.index); } + /** Returns the hash code of this shard id. + * + * @return the hash code of this shard id + */ @Override public int hashCode() { return hashCode; } + /** Computes the hash code of this shard id. + * + * @return the hash code of this shard id. + */ private int computeHashCode() { int result = index != null ? index.hashCode() : 0; result = 31 * result + shardId; return result; } + /** + * Compares this ShardId with the specified ShardId. + * @param o the ShardId to be compared. + * @return a negative integer, zero, or a positive integer if this ShardId is less than, equal to, or greater than the specified ShardId + */ @Override public int compareTo(ShardId o) { if (o.getId() == shardId) { diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java index ab887acb85a87..3ce8b4953b9d6 100644 --- a/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/AllCircuitBreakerStats.java @@ -47,25 +47,51 @@ */ public class AllCircuitBreakerStats implements Writeable, ToXContentFragment { + /** An array of all the circuit breaker stats */ private final CircuitBreakerStats[] allStats; + /** + * Constructs the instance + * + * @param allStats an array of all the circuit breaker stats + */ public AllCircuitBreakerStats(CircuitBreakerStats[] allStats) { this.allStats = allStats; } + /** + * Constructs the new instance from {@link StreamInput} + * @param in the {@link StreamInput} to read from + * @throws IOException If an error occurs while reading from the StreamInput + * @see #writeTo(StreamOutput) + */ public AllCircuitBreakerStats(StreamInput in) throws IOException { allStats = in.readArray(CircuitBreakerStats::new, CircuitBreakerStats[]::new); } + /** + * Writes this instance into a {@link StreamOutput} + * @param out the {@link StreamOutput} to write to + * @throws IOException if an error occurs while writing to the StreamOutput + */ @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(allStats); } + /** + * Returns inner stats instances for all circuit breakers + * @return inner stats instances for all circuit breakers + */ public CircuitBreakerStats[] getAllStats() { return this.allStats; } + /** + * Returns the stats for a specific circuit breaker + * @param name the name of the circuit breaker + * @return the {@link CircuitBreakerStats} for the circuit breaker, null if the circuit breaker with such name does not exist + */ public CircuitBreakerStats getStats(String name) { for (CircuitBreakerStats stats : allStats) { if (stats.getName().equals(name)) { diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java index 0e53a38908a96..9207d3ea77227 100644 --- a/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/CircuitBreakerStats.java @@ -43,18 +43,33 @@ import java.util.Locale; /** - * Class encapsulating stats about the circuit breaker + * Class encapsulating stats about the {@link org.opensearch.core.common.breaker.CircuitBreaker} * * @opensearch.internal */ public class CircuitBreakerStats implements Writeable, ToXContentObject { + /** The name of the circuit breaker */ private final String name; + /** The limit size in byte of the circuit breaker. Field : "limit_size_in_bytes" */ private final long limit; + /** The estimated size in byte of the breaker. Field : "estimated_size_in_bytes" */ private final long estimated; + /** The number of times the breaker has been tripped. Field : "tripped" */ private final long trippedCount; + /** The overhead of the breaker. Field : "overhead" */ private final double overhead; + /** + * Constructs new instance + * + * @param name The name of the circuit breaker + * @param limit The limit size in byte of the circuit breaker + * @param estimated The estimated size in byte of the breaker + * @param overhead The overhead of the breaker + * @param trippedCount The number of times the breaker has been tripped + * @see org.opensearch.core.common.breaker.CircuitBreaker + */ public CircuitBreakerStats(String name, long limit, long estimated, double overhead, long trippedCount) { this.name = name; this.limit = limit; @@ -63,6 +78,14 @@ public CircuitBreakerStats(String name, long limit, long estimated, double overh this.overhead = overhead; } + /** + * Constructs new instance from the {@link StreamInput} + * + * @param in The StreamInput + * @throws IOException if an error occurs while reading from the StreamInput + * @see org.opensearch.core.common.breaker.CircuitBreaker + * @see #writeTo(StreamOutput) + */ public CircuitBreakerStats(StreamInput in) throws IOException { this.limit = in.readLong(); this.estimated = in.readLong(); @@ -71,6 +94,13 @@ public CircuitBreakerStats(StreamInput in) throws IOException { this.name = in.readString(); } + /** + * Writes this instance into a {@link StreamOutput} + * + * @param out The StreamOutput + * @throws IOException if an error occurs while writing to the StreamOutput + * @see #CircuitBreakerStats(StreamInput) + */ @Override public void writeTo(StreamOutput out) throws IOException { out.writeLong(limit); @@ -80,22 +110,42 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); } + /** + * Returns the name of the circuit breaker + * @return The name of the circuit breaker + */ public String getName() { return this.name; } + /** + * Returns the limit size in byte of the circuit breaker + * @return The limit size in byte of the circuit breaker + */ public long getLimit() { return this.limit; } + /** + * Returns the estimated size in byte of the breaker + * @return The estimated size in byte of the breaker + */ public long getEstimated() { return this.estimated; } + /** + * Returns the number of times the breaker has been tripped + * @return The number of times the breaker has been tripped + */ public long getTrippedCount() { return this.trippedCount; } + /** + * Returns the overhead of the breaker + * @return The overhead of the breaker + */ public double getOverhead() { return this.overhead; } @@ -113,6 +163,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + /** + * Returns a String representation of this CircuitBreakerStats + * @return "[name,limit=limit/limit_human,estimated=estimated/estimated_human,overhead=overhead,tripped=trippedCount]" + */ @Override public String toString() { return "[" diff --git a/libs/core/src/main/java/org/opensearch/core/indices/breaker/NoneCircuitBreakerService.java b/libs/core/src/main/java/org/opensearch/core/indices/breaker/NoneCircuitBreakerService.java index 4095fd32b6d3c..49c5a393328b9 100644 --- a/libs/core/src/main/java/org/opensearch/core/indices/breaker/NoneCircuitBreakerService.java +++ b/libs/core/src/main/java/org/opensearch/core/indices/breaker/NoneCircuitBreakerService.java @@ -36,8 +36,9 @@ import org.opensearch.core.common.breaker.NoopCircuitBreaker; /** - * Class that returns a breaker that never breaks + * Class that returns a breaker that use the NoopCircuitBreaker and never breaks * + * @see org.opensearch.core.common.breaker.NoopCircuitBreaker * @opensearch.internal */ public class NoneCircuitBreakerService extends CircuitBreakerService { @@ -48,6 +49,12 @@ public NoneCircuitBreakerService() { super(); } + /** + * Returns a breaker that use the NoopCircuitBreaker and never breaks + * + * @param name name of the breaker (ignored) + * @return a NoopCircuitBreaker + */ @Override public CircuitBreaker getBreaker(String name) { return breaker; @@ -58,6 +65,12 @@ public AllCircuitBreakerStats stats() { return new AllCircuitBreakerStats(new CircuitBreakerStats[] { stats(CircuitBreaker.FIELDDATA) }); } + /** + * Always returns the same stats, a NoopCircuitBreaker never breaks and all operations are noops. + * + * @param name name of the breaker (ignored) + * @return always "fielddata", limit: -1, estimated: -1, overhead: 0, trippedCount: 0 + */ @Override public CircuitBreakerStats stats(String name) { return new CircuitBreakerStats(CircuitBreaker.FIELDDATA, -1, -1, 0, 0); diff --git a/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java b/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java index ae4f4c65b28d2..8441ce8b1b622 100644 --- a/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java +++ b/libs/core/src/main/java/org/opensearch/core/rest/RestStatus.java @@ -32,6 +32,7 @@ package org.opensearch.core.rest; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -47,6 +48,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum RestStatus { /** * The client SHOULD continue with its request. This interim response is used to inform the client that the @@ -525,6 +527,15 @@ public int getStatus() { return status; } + /** + * Get category class of a rest status code. + * + * @return Integer representing class category of the concrete rest status code + */ + public int getStatusFamilyCode() { + return status / 100; + } + public static RestStatus readFrom(StreamInput in) throws IOException { return RestStatus.valueOf(in.readString()); } diff --git a/libs/core/src/main/java/org/opensearch/core/tasks/TaskId.java b/libs/core/src/main/java/org/opensearch/core/tasks/TaskId.java index 97b0231613c73..d34d4acf00e6e 100644 --- a/libs/core/src/main/java/org/opensearch/core/tasks/TaskId.java +++ b/libs/core/src/main/java/org/opensearch/core/tasks/TaskId.java @@ -33,6 +33,7 @@ package org.opensearch.core.tasks; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ /** * Task id that consists of node id and id of the task on the node * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class TaskId implements Writeable { public static final TaskId EMPTY_TASK_ID = new TaskId(); diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java index a0e2a54fce91c..79e531a542026 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/AbstractObjectParser.java @@ -108,7 +108,7 @@ public abstract void declareNamedObject( * * Unlike the other version of this method, "ordered" mode (arrays of * objects) is not supported. - * + *

* See NamedObjectHolder in ObjectParserTests for examples of how to invoke * this. * @@ -163,7 +163,7 @@ public abstract void declareNamedObjects( * the order sent but tools that generate json are free to put object * members in an unordered Map, jumbling them. Thus, if you care about order * you can send the object in the second way. - * + *

* See NamedObjectHolder in ObjectParserTests for examples of how to invoke * this. * @@ -366,10 +366,10 @@ public void declareFieldArray( /** * Declares a set of fields that are required for parsing to succeed. Only one of the values * provided per String[] must be matched. - * + *

* E.g. declareRequiredFieldSet("foo", "bar"); means at least one of "foo" or * "bar" fields must be present. If neither of those fields are present, an exception will be thrown. - * + *

* Multiple required sets can be configured: * *


@@ -379,7 +379,7 @@ public  void declareFieldArray(
      *
      * requires that one of "foo" or "bar" fields are present, and also that one of "bizz" or
      * "buzz" fields are present.
-     *
+     * 

* In JSON, it means any of these combinations are acceptable: * *

    @@ -415,12 +415,12 @@ public void declareFieldArray( /** * Declares a set of fields of which at most one must appear for parsing to succeed - * + *

    * E.g. declareExclusiveFieldSet("foo", "bar"); means that only one of 'foo' * or 'bar' must be present, and if both appear then an exception will be thrown. Note * that this does not make 'foo' or 'bar' required - see {@link #declareRequiredFieldSet(String...)} * for required fields. - * + *

    * Multiple exclusive sets may be declared * * @param exclusiveSet a set of field names, at most one of which must appear diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/MapXContentParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/MapXContentParser.java index 254c340f8836f..0a5cda324ddb7 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/MapXContentParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/MapXContentParser.java @@ -277,7 +277,7 @@ public Token currentToken() { /** * field name that the child element needs to inherit. - * + *

    * In most cases this is the same as currentName() except with embedded arrays. In "foo": [[42]] the first START_ARRAY * token will have the name "foo", but the second START_ARRAY will have no name. */ diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/MediaType.java b/libs/core/src/main/java/org/opensearch/core/xcontent/MediaType.java index 8e3c115c7ba58..c58b3e80d98b5 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/MediaType.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/MediaType.java @@ -32,6 +32,7 @@ package org.opensearch.core.xcontent; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.Writeable; import java.io.IOException; @@ -42,7 +43,10 @@ * Abstracts a Media Type and a format parameter. * Media types are used as values on Content-Type and Accept headers * format is an URL parameter, specifies response media type. + * + * @opensearch.api */ +@PublicApi(since = "2.1.0") public interface MediaType extends Writeable { /** * Returns a type part of a MediaType diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/NamedXContentRegistry.java b/libs/core/src/main/java/org/opensearch/core/xcontent/NamedXContentRegistry.java index 10718ba98fe17..9d876825c5196 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/NamedXContentRegistry.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/NamedXContentRegistry.java @@ -33,6 +33,7 @@ package org.opensearch.core.xcontent; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import java.io.IOException; @@ -49,8 +50,9 @@ /** * Main registry for serializable content (e.g., field mappers, aggregations) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class NamedXContentRegistry { /** * The empty {@link NamedXContentRegistry} for use when you are sure that you aren't going to call @@ -64,6 +66,7 @@ public class NamedXContentRegistry { /** * An entry in the {@linkplain NamedXContentRegistry} containing the name of the object and the parser that can parse it. */ + @PublicApi(since = "1.0.0") public static class Entry { /** The class that this entry can read. */ public final Class categoryClass; diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java index dfd1449ef0e0b..976f353100c55 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilder.java @@ -32,6 +32,7 @@ package org.opensearch.core.xcontent; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; import java.io.ByteArrayOutputStream; @@ -61,6 +62,7 @@ /** * A utility to build XContent (ie json). */ +@PublicApi(since = "1.0.0") public final class XContentBuilder implements Closeable, Flushable { /** @@ -724,7 +726,7 @@ public XContentBuilder value(byte[] value, int offset, int length) throws IOExce /** * Writes the binary content of the given byte array as UTF-8 bytes. - * + *

    * Use {@link XContentParser#charBuffer()} to read the value back */ public XContentBuilder utf8Value(byte[] bytes, int offset, int length) throws IOException { diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilderExtension.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilderExtension.java index 0535da1a584be..9b13ebb23be86 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilderExtension.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentBuilderExtension.java @@ -37,7 +37,7 @@ /** * This interface provides a way for non-JDK classes to plug in a way to serialize to xcontent. - * + *

    * It is greatly preferred that you implement {@link ToXContentFragment} * in the class for encoding, however, in some situations you may not own the * class, in which case you can add an implementation here for encoding it. @@ -63,7 +63,7 @@ public interface XContentBuilderExtension { * Used for plugging in a human readable version of a class's encoding. It is assumed that * the human readable equivalent is always behind the {@code toString()} method, so * this transformer returns the raw value to be used. - * + *

    * An example implementation: * *

    @@ -79,7 +79,7 @@ public interface XContentBuilderExtension {
         /**
          * Used for plugging a transformer for a date or time type object into a String (or other
          * encodable object).
    -     *
    +     * 

    * For example: * *

    diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java
    index a2f16209a5b7f..4bfd47ccfdc94 100644
    --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java
    +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParser.java
    @@ -44,7 +44,7 @@
     
     /**
      * Interface for pull - parsing {@link XContent} see {@code XContentType} for supported types.
    - *
    + * 

    * To obtain an instance of this class use the following pattern: * *

    @@ -202,11 +202,11 @@  Map map(Supplier> mapFactory, CheckedFunction
          * Default implementation simply returns false since only actual
          * implementation class has knowledge of its internal buffering
          * state.
    -     *
    +     * 

    * This method shouldn't be used to check if the token contains text or not. */ boolean hasTextCharacters(); diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParserUtils.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParserUtils.java index 13e2f6a695d1b..b10be393f9adb 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParserUtils.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentParserUtils.java @@ -142,10 +142,10 @@ public static Object parseFieldsValue(XContentParser parser) throws IOException * This method expects that the current field name is the concatenation of a type, a delimiter and a name * (ex: terms#foo where "terms" refers to the type of a registered {@link NamedXContentRegistry.Entry}, * "#" is the delimiter and "foo" the name of the object to parse). - * + *

    * It also expected that following this field name is either an Object or an array xContent structure and * the cursor points to the start token of this structure. - * + *

    * The method splits the field's name to extract the type and name and then parses the object * using the {@link XContentParser#namedObject(Class, String, Object)} method. * diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentSubParser.java b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentSubParser.java index d1cdda4aeb8be..337cf9f95fe5f 100644 --- a/libs/core/src/main/java/org/opensearch/core/xcontent/XContentSubParser.java +++ b/libs/core/src/main/java/org/opensearch/core/xcontent/XContentSubParser.java @@ -43,7 +43,7 @@ /** * Wrapper for a XContentParser that makes a single object/array look like a complete document. - * + *

    * The wrapper prevents the parsing logic to consume tokens outside of the wrapped object as well * as skipping to the end of the object in case of a parsing error. The wrapper is intended to be * used for parsing objects that should be ignored if they are malformed. diff --git a/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java b/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java index 9861847c9e1ea..b6dc0ceb1028f 100644 --- a/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java +++ b/libs/dissect/src/main/java/org/opensearch/dissect/DissectParser.java @@ -194,26 +194,25 @@ public DissectParser(String pattern, String appendSeparator) { * @throws DissectException if unable to dissect a pair into it's parts. */ public Map parse(String inputString) { - /** - * - * This implements a naive string matching algorithm. The string is walked left to right, comparing each byte against - * another string's bytes looking for matches. If the bytes match, then a second cursor looks ahead to see if all the bytes - * of the other string matches. If they all match, record it and advances the primary cursor to the match point. If it can not match - * all of the bytes then progress the main cursor. Repeat till the end of the input string. Since the string being searching for - * (the delimiter) is generally small and rare the naive approach is efficient. - * - * In this case the string that is walked is the input string, and the string being searched for is the current delimiter. - * For example for a dissect pattern of {@code %{a},%{b}:%{c}} the delimiters (comma then colon) are searched for in the - * input string. At class construction the list of keys+delimiters are found (dissectPairs), which allows the use of that ordered - * list to know which delimiter to use for the search. The delimiters is progressed once the current delimiter is matched. - * - * There are two special cases that requires additional parsing beyond the standard naive algorithm. Consecutive delimiters should - * results in a empty matches unless the {@code ->} is provided. For example given the dissect pattern of - * {@code %{a},%{b},%{c},%{d}} and input string of {@code foo,,,} the match should be successful with empty values for b,c and d. - * However, if the key modifier {@code ->}, is present it will simply skip over any delimiters just to the right of the key - * without assigning any values. For example {@code %{a->},{%b}} will match the input string of {@code foo,,,,,,bar} with a=foo and - * b=bar. - * + /* + + This implements a naive string matching algorithm. The string is walked left to right, comparing each byte against + another string's bytes looking for matches. If the bytes match, then a second cursor looks ahead to see if all the bytes + of the other string matches. If they all match, record it and advances the primary cursor to the match point. If it can not match + all of the bytes then progress the main cursor. Repeat till the end of the input string. Since the string being searching for + (the delimiter) is generally small and rare the naive approach is efficient. + + In this case the string that is walked is the input string, and the string being searched for is the current delimiter. + For example for a dissect pattern of {@code %{a},%{b}:%{c}} the delimiters (comma then colon) are searched for in the + input string. At class construction the list of keys+delimiters are found (dissectPairs), which allows the use of that ordered + list to know which delimiter to use for the search. The delimiters is progressed once the current delimiter is matched. + + There are two special cases that requires additional parsing beyond the standard naive algorithm. Consecutive delimiters should + results in a empty matches unless the {@code ->} is provided. For example given the dissect pattern of + {@code %{a},%{b},%{c},%{d}} and input string of {@code foo,,,} the match should be successful with empty values for b,c and d. + However, if the key modifier {@code ->}, is present it will simply skip over any delimiters just to the right of the key + without assigning any values. For example {@code %{a->},{%b}} will match the input string of {@code foo,,,,,,bar} with a=foo and + b=bar. */ DissectMatch dissectMatch = new DissectMatch(appendSeparator, maxMatches, maxResults, appendCount, referenceCount); Iterator it = matchPairs.iterator(); diff --git a/libs/geo/src/main/java/org/opensearch/geometry/Circle.java b/libs/geo/src/main/java/org/opensearch/geometry/Circle.java index 6f8b0dc6929cc..c05f316b53b9c 100644 --- a/libs/geo/src/main/java/org/opensearch/geometry/Circle.java +++ b/libs/geo/src/main/java/org/opensearch/geometry/Circle.java @@ -39,12 +39,19 @@ * and optional altitude in meters. */ public class Circle implements Geometry { + + /** Empty circle : x=0, y=0, z=NaN radius=-1 */ public static final Circle EMPTY = new Circle(); + /** Latitude of the center of the circle in degrees */ private final double y; + /** Longitude of the center of the circle in degrees */ private final double x; + /** Altitude of the center of the circle in meters (NaN if irrelevant) */ private final double z; + /** Radius of the circle in meters */ private final double radiusMeters; + /** Create an {@link #EMPTY} circle */ private Circle() { y = 0; x = 0; @@ -52,10 +59,23 @@ private Circle() { radiusMeters = -1; } + /** + * Create a circle with no altitude. + * @param x Longitude of the center of the circle in degrees + * @param y Latitude of the center of the circle in degrees + * @param radiusMeters Radius of the circle in meters + */ public Circle(final double x, final double y, final double radiusMeters) { this(x, y, Double.NaN, radiusMeters); } + /** + * Create a circle with altitude. + * @param x Longitude of the center of the circle in degrees + * @param y Latitude of the center of the circle in degrees + * @param z Altitude of the center of the circle in meters + * @param radiusMeters Radius of the circle in meters + */ public Circle(final double x, final double y, final double z, final double radiusMeters) { this.y = y; this.x = x; @@ -66,39 +86,68 @@ public Circle(final double x, final double y, final double z, final double radiu } } + /** + * @return The type of this geometry (always {@link ShapeType#CIRCLE}) + */ @Override public ShapeType type() { return ShapeType.CIRCLE; } + /** + * @return The y (latitude) of the center of the circle in degrees + */ public double getY() { return y; } + /** + * @return The x (longitude) of the center of the circle in degrees + */ public double getX() { return x; } + /** + * @return The radius of the circle in meters + */ public double getRadiusMeters() { return radiusMeters; } + /** + * @return The altitude of the center of the circle in meters (NaN if irrelevant) + */ public double getZ() { return z; } + /** + * @return The latitude (y) of the center of the circle in degrees + */ public double getLat() { return y; } + /** + * @return The longitude (x) of the center of the circle in degrees + */ public double getLon() { return x; } + /** + * @return The altitude (z) of the center of the circle in meters (NaN if irrelevant) + */ public double getAlt() { return z; } + /** + * Compare this circle to another circle. + * @param o The other circle + * @return True if the two circles are equal in all their properties. False if null or different. + */ @Override public boolean equals(Object o) { if (this == o) return true; @@ -111,6 +160,9 @@ public boolean equals(Object o) { return (Double.compare(circle.z, z) == 0); } + /** + * @return The hashcode of this circle. + */ @Override public int hashCode() { int result; @@ -126,11 +178,23 @@ public int hashCode() { return result; } + /** + * Visit this circle with a {@link GeometryVisitor}. + * + * @param visitor The visitor + * @param The return type of the visitor + * @param The exception type of the visitor + * @return The result of the visitor + * @throws E The exception thrown by the visitor + */ @Override public T visit(GeometryVisitor visitor) throws E { return visitor.visit(this); } + /** + * @return True if this circle is empty (radius less than 0) + */ @Override public boolean isEmpty() { return radiusMeters < 0; @@ -141,6 +205,9 @@ public String toString() { return WellKnownText.INSTANCE.toWKT(this); } + /** + * @return True if this circle has an altitude. False if NaN. + */ @Override public boolean hasZ() { return Double.isNaN(z) == false; diff --git a/libs/geo/src/main/java/org/opensearch/geometry/utils/BitUtil.java b/libs/geo/src/main/java/org/opensearch/geometry/utils/BitUtil.java index 664e7e68d96a5..c946cc2473202 100644 --- a/libs/geo/src/main/java/org/opensearch/geometry/utils/BitUtil.java +++ b/libs/geo/src/main/java/org/opensearch/geometry/utils/BitUtil.java @@ -48,8 +48,8 @@ public class BitUtil { // magic numbers for bit interleaving /** * Interleaves the first 32 bits of each long value - * - * Adapted from: http://graphics.stanford.edu/~seander/bithacks.html#InterleaveBMN + *

    + * Adapted from: bithacks.html#InterleaveBMN */ public static long interleave(int even, int odd) { long v1 = 0x00000000FFFFFFFFL & even; diff --git a/libs/geo/src/main/java/org/opensearch/geometry/utils/Geohash.java b/libs/geo/src/main/java/org/opensearch/geometry/utils/Geohash.java index 8b3b841e221e5..33c423e136613 100644 --- a/libs/geo/src/main/java/org/opensearch/geometry/utils/Geohash.java +++ b/libs/geo/src/main/java/org/opensearch/geometry/utils/Geohash.java @@ -39,12 +39,12 @@ /** * Utilities for converting to/from the GeoHash standard - * + *

    * The geohash long format is represented as lon/lat (x/y) interleaved with the 4 least significant bits * representing the level (1-12) [xyxy...xyxyllll] - * + *

    * This differs from a morton encoded value which interleaves lat/lon (y/x). - * + *

    * NOTE: this will replace {@code org.opensearch.common.geo.GeoHashUtils} */ public class Geohash { diff --git a/libs/grok/src/main/java/org/opensearch/grok/Grok.java b/libs/grok/src/main/java/org/opensearch/grok/Grok.java index cd786b74be039..7aa3347ba4f4b 100644 --- a/libs/grok/src/main/java/org/opensearch/grok/Grok.java +++ b/libs/grok/src/main/java/org/opensearch/grok/Grok.java @@ -151,7 +151,7 @@ private void validatePatternBank() { /** * Checks whether patterns reference each other in a circular manner and, if so, fail with an exception. * Also checks for malformed pattern definitions and fails with an exception. - * + *

    * In a pattern, anything between %{ and } or : is considered * a reference to another named pattern. This method will navigate to all these named patterns and * check for a circular reference. diff --git a/libs/grok/src/main/java/org/opensearch/grok/MatcherWatchdog.java b/libs/grok/src/main/java/org/opensearch/grok/MatcherWatchdog.java index 5c7eaca2a634a..d5b7566ecc90f 100644 --- a/libs/grok/src/main/java/org/opensearch/grok/MatcherWatchdog.java +++ b/libs/grok/src/main/java/org/opensearch/grok/MatcherWatchdog.java @@ -44,7 +44,7 @@ * Protects against long running operations that happen between the register and unregister invocations. * Threads that invoke {@link #register(Matcher)}, but take too long to invoke the {@link #unregister(Matcher)} method * will be interrupted. - * + *

    * This is needed for Joni's {@link org.joni.Matcher#search(int, int, int)} method, because * it can end up spinning endlessly if the regular expression is too complex. Joni has checks * that for every 30k iterations it checks if the current thread is interrupted and if so diff --git a/libs/nio/src/main/java/org/opensearch/nio/ChannelContext.java b/libs/nio/src/main/java/org/opensearch/nio/ChannelContext.java index 797dfe859fa6c..0e29661978716 100644 --- a/libs/nio/src/main/java/org/opensearch/nio/ChannelContext.java +++ b/libs/nio/src/main/java/org/opensearch/nio/ChannelContext.java @@ -116,10 +116,10 @@ protected void handleException(Exception e) { /** * Schedules a channel to be closed by the selector event loop with which it is registered. - * + *

    * If the channel is open and the state can be transitioned to closed, the close operation will * be scheduled with the event loop. - * + *

    * Depending on the underlying protocol of the channel, a close operation might simply close the socket * channel or may involve reading and writing messages. */ diff --git a/libs/nio/src/main/java/org/opensearch/nio/NioSelector.java b/libs/nio/src/main/java/org/opensearch/nio/NioSelector.java index a38a33182afea..4ed745723515c 100644 --- a/libs/nio/src/main/java/org/opensearch/nio/NioSelector.java +++ b/libs/nio/src/main/java/org/opensearch/nio/NioSelector.java @@ -512,12 +512,12 @@ private void handleQueuedWrites() { * This is a convenience method to be called after some object (normally channels) are enqueued with this * selector. This method will check if the selector is still open. If it is open, normal operation can * proceed. - * + *

    * If the selector is closed, then we attempt to remove the object from the queue. If the removal * succeeds then we throw an {@link IllegalStateException} indicating that normal operation failed. If * the object cannot be removed from the queue, then the object has already been handled by the selector * and operation can proceed normally. - * + *

    * If this method is called from the selector thread, we will not allow the queuing to occur as the * selector thread can manipulate its queues internally even if it is no longer open. * diff --git a/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java b/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java index 12a1e80055823..3df8e42fe4f14 100644 --- a/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java +++ b/libs/nio/src/main/java/org/opensearch/nio/SocketChannelContext.java @@ -59,7 +59,7 @@ * that it is ready to perform certain operations (read, write, etc) the {@link SocketChannelContext} will * be called. This context will need to implement all protocol related logic. Additionally, if any special * close behavior is required, it should be implemented in this context. - * + *

    * The only methods of the context that should ever be called from a non-selector thread are * {@link #closeChannel()} and {@link #sendMessage(Object, BiConsumer)}. */ diff --git a/libs/telemetry/build.gradle b/libs/telemetry/build.gradle index ce94698836b4f..f8499482a6093 100644 --- a/libs/telemetry/build.gradle +++ b/libs/telemetry/build.gradle @@ -10,6 +10,8 @@ */ dependencies { + api project(':libs:opensearch-common') + testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testImplementation "junit:junit:${versions.junit}" testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}" diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/Telemetry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/Telemetry.java index 65c974a0d0c36..0f973f50fc640 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/Telemetry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/Telemetry.java @@ -8,14 +8,16 @@ package org.opensearch.telemetry; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.telemetry.metrics.MetricsTelemetry; import org.opensearch.telemetry.tracing.TracingTelemetry; /** * Interface defining telemetry * - * @opensearch.internal + * @opensearch.experimental */ +@ExperimentalApi public interface Telemetry { /** diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Counter.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Counter.java new file mode 100644 index 0000000000000..c62288d280e2f --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/Counter.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.telemetry.metrics.tags.Tags; + +/** + * Counter adds the value to the existing metric. + * {@opensearch.experimental} + */ +@ExperimentalApi +public interface Counter { + + /** + * add value. + * @param value value to be added. + */ + void add(double value); + + /** + * add value along with the attributes. + * + * @param value value to be added. + * @param tags attributes/dimensions of the metric. + */ + void add(double value, Tags tags); + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java new file mode 100644 index 0000000000000..d57def9406b17 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistry.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import java.io.IOException; + +/** + * Default implementation for {@link MetricsRegistry} + */ +class DefaultMetricsRegistry implements MetricsRegistry { + private final MetricsTelemetry metricsTelemetry; + + /** + * Constructor + * @param metricsTelemetry metrics telemetry. + */ + public DefaultMetricsRegistry(MetricsTelemetry metricsTelemetry) { + this.metricsTelemetry = metricsTelemetry; + } + + @Override + public Counter createCounter(String name, String description, String unit) { + return metricsTelemetry.createCounter(name, description, unit); + } + + @Override + public Counter createUpDownCounter(String name, String description, String unit) { + return metricsTelemetry.createUpDownCounter(name, description, unit); + } + + @Override + public void close() throws IOException { + metricsTelemetry.close(); + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java new file mode 100644 index 0000000000000..61b3df089928b --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistry.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.io.Closeable; + +/** + * MetricsRegistry helps in creating the metric instruments. + * @opensearch.experimental + */ +@ExperimentalApi +public interface MetricsRegistry extends Closeable { + + /** + * Creates the counter. + * @param name name of the counter. + * @param description any description about the metric. + * @param unit unit of the metric. + * @return counter. + */ + Counter createCounter(String name, String description, String unit); + + /** + * Creates the upDown counter. + * @param name name of the upDown counter. + * @param description any description about the metric. + * @param unit unit of the metric. + * @return counter. + */ + Counter createUpDownCounter(String name, String description, String unit); +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java index fa3b7fd192f1a..fb3dec8152b4f 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java @@ -8,9 +8,14 @@ package org.opensearch.telemetry.metrics; +import org.opensearch.common.annotation.ExperimentalApi; + /** * Interface for metrics telemetry providers + * + * @opensearch.experimental */ -public interface MetricsTelemetry { +@ExperimentalApi +public interface MetricsTelemetry extends MetricsRegistry { } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopCounter.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopCounter.java new file mode 100644 index 0000000000000..c1daf564dd3bc --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopCounter.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.noop; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.tags.Tags; + +/** + * No-op {@link Counter} + * {@opensearch.internal} + */ +@InternalApi +public class NoopCounter implements Counter { + + /** + * No-op Counter instance + */ + public final static NoopCounter INSTANCE = new NoopCounter(); + + private NoopCounter() {} + + @Override + public void add(double value) { + + } + + @Override + public void add(double value, Tags tags) { + + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java new file mode 100644 index 0000000000000..640c6842a8960 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/NoopMetricsRegistry.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.noop; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.MetricsRegistry; + +import java.io.IOException; + +/** + *No-op {@link MetricsRegistry} + * {@opensearch.internal} + */ +@InternalApi +public class NoopMetricsRegistry implements MetricsRegistry { + + /** + * No-op Meter instance + */ + public final static NoopMetricsRegistry INSTANCE = new NoopMetricsRegistry(); + + private NoopMetricsRegistry() {} + + @Override + public Counter createCounter(String name, String description, String unit) { + return NoopCounter.INSTANCE; + } + + @Override + public Counter createUpDownCounter(String name, String description, String unit) { + return NoopCounter.INSTANCE; + } + + @Override + public void close() throws IOException { + + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/package-info.java new file mode 100644 index 0000000000000..7c7ed08044993 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/noop/package-info.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains metrics related classes + * {@opensearch.internal} + */ +@InternalApi +package org.opensearch.telemetry.metrics.noop; + +import org.opensearch.common.annotation.InternalApi; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/Tags.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/Tags.java new file mode 100644 index 0000000000000..f2a8764f8021d --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/Tags.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.tags; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Class to create tags for a meter. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class Tags { + private final Map tagsMap; + /** + * Empty value. + */ + public final static Tags EMPTY = new Tags(Collections.emptyMap()); + + /** + * Factory method. + * @return tags. + */ + public static Tags create() { + return new Tags(new HashMap<>()); + } + + /** + * Constructor. + */ + private Tags(Map tagsMap) { + this.tagsMap = tagsMap; + } + + /** + * Add String attribute. + * @param key key + * @param value value + * @return Same instance. + */ + public Tags addTag(String key, String value) { + Objects.requireNonNull(value, "value cannot be null"); + tagsMap.put(key, value); + return this; + } + + /** + * Add long attribute. + * @param key key + * @param value value + * @return Same instance. + */ + public Tags addTag(String key, long value) { + tagsMap.put(key, value); + return this; + }; + + /** + * Add double attribute. + * @param key key + * @param value value + * @return Same instance. + */ + public Tags addTag(String key, double value) { + tagsMap.put(key, value); + return this; + }; + + /** + * Add boolean attribute. + * @param key key + * @param value value + * @return Same instance. + */ + public Tags addTag(String key, boolean value) { + tagsMap.put(key, value); + return this; + }; + + /** + * Returns the attribute map. + * @return tags map + */ + public Map getTagsMap() { + return Collections.unmodifiableMap(tagsMap); + } + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/package-info.java new file mode 100644 index 0000000000000..70bc9be992b32 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/tags/package-info.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains metrics related classes + * @opensearch.experimental + */ +@ExperimentalApi +package org.opensearch.telemetry.metrics.tags; + +import org.opensearch.common.annotation.ExperimentalApi; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/AbstractSpan.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/AbstractSpan.java index 150a32b14d0f8..6919995e0ef65 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/AbstractSpan.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/AbstractSpan.java @@ -8,11 +8,14 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.InternalApi; + /** * Base span * * @opensearch.internal */ +@InternalApi public abstract class AbstractSpan implements Span { /** diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultScopedSpan.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultScopedSpan.java new file mode 100644 index 0000000000000..dc1a775839adb --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultScopedSpan.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.common.annotation.InternalApi; + +import java.util.Objects; + +/** + * Default implementation of Scope + * + * @opensearch.internal + */ +@InternalApi +final class DefaultScopedSpan implements ScopedSpan { + + private final Span span; + + private final SpanScope spanScope; + + /** + * Creates Scope instance for the given span + * + * @param span underlying span + * @param spanScope span scope. + */ + public DefaultScopedSpan(Span span, SpanScope spanScope) { + this.span = Objects.requireNonNull(span); + this.spanScope = Objects.requireNonNull(spanScope); + } + + @Override + public void addAttribute(String key, String value) { + span.addAttribute(key, value); + } + + @Override + public void addAttribute(String key, long value) { + span.addAttribute(key, value); + } + + @Override + public void addAttribute(String key, double value) { + span.addAttribute(key, value); + } + + @Override + public void addAttribute(String key, boolean value) { + span.addAttribute(key, value); + } + + @Override + public void addEvent(String event) { + span.addEvent(event); + } + + @Override + public void setError(Exception exception) { + span.setError(exception); + } + + /** + * Executes the runnable to end the scope + */ + @Override + public void close() { + span.endSpan(); + spanScope.close(); + } + + /** + * Returns span. + * @return the span associated with this scope + */ + Span getSpan() { + return span; + } + + /** + * Returns {@link SpanScope} + * @return spanScope + */ + SpanScope getSpanScope() { + return spanScope; + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java index 356b72187de74..a5d515443b54d 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java @@ -8,65 +8,73 @@ package org.opensearch.telemetry.tracing; -import java.util.function.Consumer; +import org.opensearch.common.annotation.InternalApi; + +import java.util.Objects; /** - * Default implementation of Scope + * Default implementation for {@link SpanScope} * * @opensearch.internal */ -final class DefaultSpanScope implements SpanScope { - +@InternalApi +class DefaultSpanScope implements SpanScope { private final Span span; - - private final Consumer onCloseConsumer; + private final SpanScope previousSpanScope; + private static final ThreadLocal spanScopeThreadLocal = new ThreadLocal<>(); + private final TracerContextStorage tracerContextStorage; /** - * Creates Scope instance for the given span - * - * @param span underlying span - * @param onCloseConsumer consumer to execute on scope close + * Constructor + * @param span span + * @param previousSpanScope before attached span scope. */ - public DefaultSpanScope(Span span, Consumer onCloseConsumer) { - this.span = span; - this.onCloseConsumer = onCloseConsumer; + private DefaultSpanScope(Span span, SpanScope previousSpanScope, TracerContextStorage tracerContextStorage) { + this.span = Objects.requireNonNull(span); + this.previousSpanScope = previousSpanScope; + this.tracerContextStorage = tracerContextStorage; } - @Override - public void addSpanAttribute(String key, String value) { - span.addAttribute(key, value); + /** + * Creates the SpanScope object. + * @param span span. + * @param tracerContextStorage tracer context storage. + * @return SpanScope spanScope + */ + public static SpanScope create(Span span, TracerContextStorage tracerContextStorage) { + final SpanScope beforeSpanScope = spanScopeThreadLocal.get(); + SpanScope newSpanScope = new DefaultSpanScope(span, beforeSpanScope, tracerContextStorage); + spanScopeThreadLocal.set(newSpanScope); + return newSpanScope; } @Override - public void addSpanAttribute(String key, long value) { - span.addAttribute(key, value); + public void close() { + detach(); + spanScopeThreadLocal.set(previousSpanScope); } @Override - public void addSpanAttribute(String key, double value) { - span.addAttribute(key, value); + public SpanScope attach() { + tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, this.span); + return this; } - @Override - public void addSpanAttribute(String key, boolean value) { - span.addAttribute(key, value); + private void detach() { + if (previousSpanScope != null) { + tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, previousSpanScope.getSpan()); + } else { + tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, null); + } } @Override - public void addSpanEvent(String event) { - span.addEvent(event); + public Span getSpan() { + return span; } - @Override - public void setError(Exception exception) { - span.setError(exception); + static SpanScope getCurrentSpanScope() { + return spanScopeThreadLocal.get(); } - /** - * Executes the runnable to end the scope - */ - @Override - public void close() { - onCloseConsumer.accept(span); - } } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java index 2f3a425f96703..a3bb64ea392a9 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java @@ -8,10 +8,13 @@ package org.opensearch.telemetry.tracing; -import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.common.annotation.InternalApi; import java.io.Closeable; import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Optional; /** * @@ -20,8 +23,12 @@ * * @opensearch.internal */ +@InternalApi class DefaultTracer implements Tracer { - static final String THREAD_NAME = "th_name"; + /** + * Current thread name. + */ + static final String THREAD_NAME = "thread.name"; private final TracingTelemetry tracingTelemetry; private final TracerContextStorage tracerContextStorage; @@ -38,26 +45,17 @@ public DefaultTracer(TracingTelemetry tracingTelemetry, TracerContextStorage endSpan(scopeSpan)); + return span; } @Override @@ -69,20 +67,31 @@ private Span getCurrentSpanInternal() { return tracerContextStorage.get(TracerContextStorage.CURRENT_SPAN); } + @Override public SpanContext getCurrentSpan() { final Span currentSpan = tracerContextStorage.get(TracerContextStorage.CURRENT_SPAN); return (currentSpan == null) ? null : new SpanContext(currentSpan); } - private void endSpan(Span span) { - if (span != null) { - span.endSpan(); - setCurrentSpanInContext(span.getParentSpan()); - } + @Override + public ScopedSpan startScopedSpan(SpanCreationContext spanCreationContext) { + Span span = startSpan(spanCreationContext); + SpanScope spanScope = withSpanInScope(span); + return new DefaultScopedSpan(span, spanScope); + } + + @Override + public SpanScope withSpanInScope(Span span) { + return DefaultSpanScope.create(span, tracerContextStorage).attach(); + } + + @Override + public boolean isRecording() { + return true; } - private Span createSpan(String spanName, Span parentSpan, Attributes attributes) { - return tracingTelemetry.createSpan(spanName, parentSpan, attributes); + private Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan) { + return tracingTelemetry.createSpan(spanCreationContext, parentSpan); } private void setCurrentSpanInContext(Span span) { @@ -97,4 +106,10 @@ protected void addDefaultAttributes(Span span) { span.addAttribute(THREAD_NAME, Thread.currentThread().getName()); } + @Override + public Span startSpan(SpanCreationContext spanCreationContext, Map> headers) { + Optional propagatedSpan = tracingTelemetry.getContextPropagator().extractFromHeaders(headers); + return startSpan(spanCreationContext.parent(propagatedSpan.map(SpanContext::new).orElse(null))); + } + } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/ScopedSpan.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/ScopedSpan.java new file mode 100644 index 0000000000000..b320bc415de29 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/ScopedSpan.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.telemetry.tracing.noop.NoopScopedSpan; + +/** + * An auto-closeable that represents scoped span. + * It provides interface for all the span operations. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface ScopedSpan extends AutoCloseable { + /** + * No-op Scope implementation + */ + ScopedSpan NO_OP = new NoopScopedSpan(); + + /** + * Adds string attribute to the {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addAttribute(String key, String value); + + /** + * Adds long attribute to the {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addAttribute(String key, long value); + + /** + * Adds double attribute to the {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addAttribute(String key, double value); + + /** + * Adds boolean attribute to the {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addAttribute(String key, boolean value); + + /** + * Adds an event to the {@link Span}. + * + * @param event event name + */ + void addEvent(String event); + + /** + * Records error in the span + * + * @param exception exception to be recorded + */ + void setError(Exception exception); + + /** + * closes the scope + */ + @Override + void close(); +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java index 6cb1c8234f3de..00b64492c281e 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java @@ -8,13 +8,16 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.ExperimentalApi; + /** * An interface that represents a tracing span. * Spans are created by the Tracer.startSpan method. * Span must be ended by calling SpanScope.close which internally calls Span's endSpan. * - * @opensearch.internal + * @opensearch.experimental */ +@ExperimentalApi public interface Span { /** diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java index b849869afdc03..f9af611553aff 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanContext.java @@ -8,10 +8,15 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.ExperimentalApi; + /** * Wrapped Span will be exposed to the code outside of tracing package for sharing the {@link Span} without having access to * its properties. + * + * @opensearch.experimental */ +@ExperimentalApi public final class SpanContext { private final Span span; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java new file mode 100644 index 0000000000000..cbbcfe7a85d57 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanCreationContext.java @@ -0,0 +1,122 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.telemetry.tracing.attributes.Attributes; + +/** + * Context for span details. + * + * @opensearch.experimental + */ +@ExperimentalApi +public final class SpanCreationContext { + private String spanName; + private Attributes attributes; + private SpanKind spanKind = SpanKind.INTERNAL; + private SpanContext parent; + + /** + * Constructor. + */ + private SpanCreationContext() {} + + /** + * Sets the span type to server. + * @return spanCreationContext + */ + public static SpanCreationContext server() { + SpanCreationContext spanCreationContext = new SpanCreationContext(); + spanCreationContext.spanKind = SpanKind.SERVER; + return spanCreationContext; + } + + /** + * Sets the span type to client. + * @return spanCreationContext + */ + public static SpanCreationContext client() { + SpanCreationContext spanCreationContext = new SpanCreationContext(); + spanCreationContext.spanKind = SpanKind.CLIENT; + return spanCreationContext; + } + + /** + * Sets the span type to internal. + * @return spanCreationContext + */ + public static SpanCreationContext internal() { + SpanCreationContext spanCreationContext = new SpanCreationContext(); + spanCreationContext.spanKind = SpanKind.INTERNAL; + return spanCreationContext; + } + + /** + * Sets the span name. + * @param spanName span name. + * @return spanCreationContext + */ + public SpanCreationContext name(String spanName) { + this.spanName = spanName; + return this; + } + + /** + * Sets the span attributes. + * @param attributes attributes. + * @return spanCreationContext + */ + public SpanCreationContext attributes(Attributes attributes) { + this.attributes = attributes; + return this; + } + + /** + * Sets the parent for spann + * @param parent parent + * @return spanCreationContext + */ + public SpanCreationContext parent(SpanContext parent) { + this.parent = parent; + return this; + } + + /** + * Returns the span name. + * @return span name + */ + public String getSpanName() { + return spanName; + } + + /** + * Returns the span attributes. + * @return attributes. + */ + public Attributes getAttributes() { + return attributes; + } + + /** + * Returns the span kind. + * @return spankind. + */ + public SpanKind getSpanKind() { + return spanKind; + } + + /** + * Returns the parent span + * @return parent. + */ + public SpanContext getParent() { + return parent; + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanKind.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanKind.java new file mode 100644 index 0000000000000..d674bb2c866f2 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanKind.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.common.annotation.PublicApi; + +/** + * Type of Span. + */ +@PublicApi(since = "2.11.0") +public enum SpanKind { + /** + * Span represents the client side code. + */ + CLIENT, + /** + * Span represents the server side code. + */ + SERVER, + + /** + * Span represents the internal calls. This is the default value of a span type. + */ + INTERNAL; +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanReference.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanReference.java index 180136ecf7a57..945682c3df390 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanReference.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanReference.java @@ -8,11 +8,14 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.InternalApi; + /** * Wrapper class to hold reference of Span * * @opensearch.internal */ +@InternalApi final class SpanReference { private Span span; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanScope.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanScope.java index cf67165d889bc..8bccd5774a340 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanScope.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanScope.java @@ -8,67 +8,34 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.telemetry.tracing.noop.NoopSpanScope; /** * An auto-closeable that represents scope of the span. - * It provides interface for all the span operations. + * + * @opensearch.experimental */ +@ExperimentalApi public interface SpanScope extends AutoCloseable { + /** * No-op Scope implementation */ SpanScope NO_OP = new NoopSpanScope(); - /** - * Adds string attribute to the {@link Span}. - * - * @param key attribute key - * @param value attribute value - */ - void addSpanAttribute(String key, String value); - - /** - * Adds long attribute to the {@link Span}. - * - * @param key attribute key - * @param value attribute value - */ - void addSpanAttribute(String key, long value); - - /** - * Adds double attribute to the {@link Span}. - * - * @param key attribute key - * @param value attribute value - */ - void addSpanAttribute(String key, double value); - - /** - * Adds boolean attribute to the {@link Span}. - * - * @param key attribute key - * @param value attribute value - */ - void addSpanAttribute(String key, boolean value); - - /** - * Adds an event to the {@link Span}. - * - * @param event event name - */ - void addSpanEvent(String event); + @Override + void close(); /** - * Records error in the span - * - * @param exception exception to be recorded + * Attaches span to the {@link SpanScope} + * @return spanScope */ - void setError(Exception exception); + SpanScope attach(); /** - * closes the scope + * Returns span attached with the {@link SpanScope} + * @return span. */ - @Override - void close(); + Span getSpan(); } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java index bc55b26abc761..8257d251e9560 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java @@ -8,47 +8,55 @@ package org.opensearch.telemetry.tracing; -import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.telemetry.tracing.http.HttpTracer; import java.io.Closeable; /** * Tracer is the interface used to create a {@link Span} * It automatically handles the context propagation between threads, tasks, nodes etc. - * + *

    * All methods on the Tracer object are multi-thread safe. + * + * @opensearch.experimental */ -public interface Tracer extends Closeable { - +@ExperimentalApi +public interface Tracer extends HttpTracer, Closeable { /** - * Starts the {@link Span} with given name + * Starts the {@link Span} with given {@link SpanCreationContext} * - * @param spanName span name - * @return scope of the span, must be closed with explicit close or with try-with-resource + * @param context span context + * @return span, must be closed. */ - SpanScope startSpan(String spanName); + Span startSpan(SpanCreationContext context); /** - * Starts the {@link Span} with given name and attributes. This is required in cases when some attribute based - * decision needs to be made before starting the span. Very useful in the case of Sampling. - * @param spanName span name. - * @param attributes attributes to be added. - * @return scope of the span, must be closed with explicit close or with try-with-resource + * Returns the current span. + * @return current wrapped span. */ - SpanScope startSpan(String spanName, Attributes attributes); + SpanContext getCurrentSpan(); /** - * Starts the {@link Span} with the given name, parent and attributes. - * @param spanName span name. - * @param parentSpan parent span. - * @param attributes attributes to be added. + * Start the span and scoped it. This must be used for scenarios where {@link SpanScope} and {@link Span} lifecycles + * are same and ends within the same thread where created. + * @param spanCreationContext span creation context * @return scope of the span, must be closed with explicit close or with try-with-resource */ - SpanScope startSpan(String spanName, SpanContext parentSpan, Attributes attributes); + ScopedSpan startScopedSpan(SpanCreationContext spanCreationContext); /** - * Returns the current span. - * @return current wrapped span. + * Creates the Span Scope for a current thread. It's mandatory to scope the span just after creation so that it will + * automatically manage the attach /detach to the current thread. + * @param span span to be scoped + * @return ScopedSpan */ - SpanContext getCurrentSpan(); + SpanScope withSpanInScope(Span span); + + /** + * Tells if the traces are being recorded or not + * @return boolean + */ + boolean isRecording(); + } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracerContextStorage.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracerContextStorage.java index d85b404b0ce41..958d054948483 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracerContextStorage.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracerContextStorage.java @@ -8,6 +8,8 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.InternalApi; + /** * Storage interface used for storing tracing context * @param key type @@ -15,6 +17,7 @@ * * @opensearch.internal */ +@InternalApi public interface TracerContextStorage { /** * Key for storing current span diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java index 3e4a377d33a3d..5fbc5d329e227 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java @@ -8,14 +8,19 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.function.BiConsumer; /** * Interface defining the tracing related context propagation * - * @opensearch.internal + * @opensearch.experimental */ +@ExperimentalApi public interface TracingContextPropagator { /** @@ -23,7 +28,15 @@ public interface TracingContextPropagator { * @param props properties * @return current span */ - Span extract(Map props); + Optional extract(Map props); + + /** + * Extracts current span from HTTP headers. + * + * @param headers request headers to extract the context from + * @return current span + */ + Optional extractFromHeaders(Map> headers); /** * Injects tracing context diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java index 2e91cadbf395f..f04a505088424 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java @@ -8,25 +8,26 @@ package org.opensearch.telemetry.tracing; -import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.common.annotation.ExperimentalApi; import java.io.Closeable; /** * Interface for tracing telemetry providers * - * @opensearch.internal + * @opensearch.experimental */ +@ExperimentalApi public interface TracingTelemetry extends Closeable { /** * Creates span with provided arguments - * @param spanName name of the span - * @param parentSpan span's parent span - * @param attributes attributes to be added. + * + * @param spanCreationContext span creation context. + * @param parentSpan parent span. * @return span instance */ - Span createSpan(String spanName, Span parentSpan, Attributes attributes); + Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan); /** * provides tracing context propagator @@ -34,9 +35,4 @@ public interface TracingTelemetry extends Closeable { */ TracingContextPropagator getContextPropagator(); - /** - * closes the resource - */ - void close(); - } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/attributes/Attributes.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/attributes/Attributes.java index fc002224c08dd..6dcc9c5468b38 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/attributes/Attributes.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/attributes/Attributes.java @@ -8,6 +8,8 @@ package org.opensearch.telemetry.tracing.attributes; +import org.opensearch.common.annotation.ExperimentalApi; + import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -15,7 +17,10 @@ /** * Class to create attributes for a span. + * + * @opensearch.experimental */ +@ExperimentalApi public class Attributes { private final Map attributesMap; /** diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/attributes/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/attributes/package-info.java index 91d1c3291a4a5..ccd56786f63ef 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/attributes/package-info.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/attributes/package-info.java @@ -7,6 +7,6 @@ */ /** - * Contains No-op implementations + * Contains attributes management */ package org.opensearch.telemetry.tracing.attributes; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java new file mode 100644 index 0000000000000..50d18c0a0d040 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/HttpTracer.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.http; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanCreationContext; + +import java.util.List; +import java.util.Map; + +/** + * HttpTracer helps in creating a {@link Span} which reads the incoming tracing information + * from the HttpRequest header and propagate the span accordingly. + *

    + * All methods on the Tracer object are multi-thread safe. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface HttpTracer { + /** + * Start the span with propagating the tracing info from the HttpRequest header. + * + * @param spanCreationContext span name. + * @param header http request header. + * @return span. + */ + Span startSpan(SpanCreationContext spanCreationContext, Map> header); +} diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/package-info.java similarity index 63% rename from server/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java rename to libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/package-info.java index e996873963b1b..9feb862a4e010 100644 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/package-info.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/http/package-info.java @@ -7,6 +7,6 @@ */ /** - * A plugin that implements compression codecs with native implementation. + * Contains No-op implementations */ -package org.opensearch.index.codec.customcodecs; +package org.opensearch.telemetry.tracing.http; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopScopedSpan.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopScopedSpan.java new file mode 100644 index 0000000000000..fc296d3689645 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopScopedSpan.java @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.noop; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.tracing.ScopedSpan; + +/** + * No-op implementation of SpanScope + * + * @opensearch.internal + */ +@InternalApi +public final class NoopScopedSpan implements ScopedSpan { + + /** + * No-args constructor + */ + public NoopScopedSpan() {} + + @Override + public void addAttribute(String key, String value) { + + } + + @Override + public void addAttribute(String key, long value) { + + } + + @Override + public void addAttribute(String key, double value) { + + } + + @Override + public void addAttribute(String key, boolean value) { + + } + + @Override + public void addEvent(String event) { + + } + + @Override + public void setError(Exception exception) { + + } + + @Override + public void close() { + + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpan.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpan.java new file mode 100644 index 0000000000000..f41e11017d155 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpan.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.noop; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.tracing.Span; + +/** + * No-op implementation of {@link org.opensearch.telemetry.tracing.Span} + * + * @opensearch.internal + */ +@InternalApi +public class NoopSpan implements Span { + + /** + * No-op Span instance + */ + public final static NoopSpan INSTANCE = new NoopSpan(); + + private NoopSpan() { + + } + + @Override + public void endSpan() { + + } + + @Override + public Span getParentSpan() { + return null; + } + + @Override + public String getSpanName() { + return "noop-span"; + } + + @Override + public void addAttribute(String key, String value) { + + } + + @Override + public void addAttribute(String key, Long value) { + + } + + @Override + public void addAttribute(String key, Double value) { + + } + + @Override + public void addAttribute(String key, Boolean value) { + + } + + @Override + public void setError(Exception exception) { + + } + + @Override + public void addEvent(String event) { + + } + + @Override + public String getTraceId() { + return "noop-trace-id"; + } + + @Override + public String getSpanId() { + return "noop-span-id"; + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpanScope.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpanScope.java index a1d16d1d80d00..bb04a67657d6e 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpanScope.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpanScope.java @@ -8,52 +8,36 @@ package org.opensearch.telemetry.tracing.noop; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.tracing.Span; import org.opensearch.telemetry.tracing.SpanScope; /** - * No-op implementation of SpanScope + * No-op implementation of {@link SpanScope} * * @opensearch.internal */ -public final class NoopSpanScope implements SpanScope { - +@InternalApi +public class NoopSpanScope implements SpanScope { /** - * No-args constructor + * Constructor. */ - public NoopSpanScope() {} - - @Override - public void addSpanAttribute(String key, String value) { - - } - - @Override - public void addSpanAttribute(String key, long value) { + public NoopSpanScope() { } @Override - public void addSpanAttribute(String key, double value) { - - } - - @Override - public void addSpanAttribute(String key, boolean value) { - - } - - @Override - public void addSpanEvent(String event) { + public void close() { } @Override - public void setError(Exception exception) { - + public SpanScope attach() { + return this; } @Override - public void close() { - + public Span getSpan() { + return NoopSpan.INSTANCE; } } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java index 1a37ed0d0f245..50452ff5fe3b4 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java @@ -8,16 +8,23 @@ package org.opensearch.telemetry.tracing.noop; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.tracing.ScopedSpan; +import org.opensearch.telemetry.tracing.Span; import org.opensearch.telemetry.tracing.SpanContext; +import org.opensearch.telemetry.tracing.SpanCreationContext; import org.opensearch.telemetry.tracing.SpanScope; import org.opensearch.telemetry.tracing.Tracer; -import org.opensearch.telemetry.tracing.attributes.Attributes; + +import java.util.List; +import java.util.Map; /** * No-op implementation of Tracer * * @opensearch.internal */ +@InternalApi public class NoopTracer implements Tracer { /** @@ -28,27 +35,37 @@ public class NoopTracer implements Tracer { private NoopTracer() {} @Override - public SpanScope startSpan(String spanName) { - return SpanScope.NO_OP; + public Span startSpan(SpanCreationContext context) { + return NoopSpan.INSTANCE; } @Override - public SpanScope startSpan(String spanName, Attributes attributes) { - return SpanScope.NO_OP; + public SpanContext getCurrentSpan() { + return new SpanContext(NoopSpan.INSTANCE); } @Override - public SpanScope startSpan(String spanName, SpanContext parentSpan, Attributes attributes) { + public ScopedSpan startScopedSpan(SpanCreationContext spanCreationContext) { + return ScopedSpan.NO_OP; + } + + @Override + public SpanScope withSpanInScope(Span span) { return SpanScope.NO_OP; } @Override - public SpanContext getCurrentSpan() { - return null; + public boolean isRecording() { + return false; } @Override public void close() { } + + @Override + public Span startSpan(SpanCreationContext spanCreationContext, Map> header) { + return NoopSpan.INSTANCE; + } } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/runnable/TraceableRunnable.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/runnable/TraceableRunnable.java index 54a5a7f1678e6..8a61dd70d6d54 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/runnable/TraceableRunnable.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/runnable/TraceableRunnable.java @@ -8,40 +8,33 @@ package org.opensearch.telemetry.tracing.runnable; -import org.opensearch.telemetry.tracing.SpanContext; -import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.ScopedSpan; +import org.opensearch.telemetry.tracing.SpanCreationContext; import org.opensearch.telemetry.tracing.Tracer; -import org.opensearch.telemetry.tracing.attributes.Attributes; /** * Wraps the runnable and add instrumentation to trace the {@link Runnable} */ public class TraceableRunnable implements Runnable { private final Runnable runnable; - private final SpanContext parent; + private final SpanCreationContext spanCreationContext; private final Tracer tracer; - private final String spanName; - private final Attributes attributes; /** * Constructor. * @param tracer tracer - * @param spanName spanName - * @param parent parent Span. - * @param attributes attributes. + * @param spanCreationContext spanCreationContext * @param runnable runnable. */ - public TraceableRunnable(Tracer tracer, String spanName, SpanContext parent, Attributes attributes, Runnable runnable) { + public TraceableRunnable(Tracer tracer, SpanCreationContext spanCreationContext, Runnable runnable) { this.tracer = tracer; - this.spanName = spanName; - this.parent = parent; - this.attributes = attributes; + this.spanCreationContext = spanCreationContext; this.runnable = runnable; } @Override public void run() { - try (SpanScope spanScope = tracer.startSpan(spanName, parent, attributes)) { + try (ScopedSpan spanScope = tracer.startScopedSpan(spanCreationContext)) { runnable.run(); } } diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java new file mode 100644 index 0000000000000..6171641db5f07 --- /dev/null +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/metrics/DefaultMetricsRegistryTests.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DefaultMetricsRegistryTests extends OpenSearchTestCase { + + private MetricsTelemetry metricsTelemetry; + private DefaultMetricsRegistry defaultMeterRegistry; + + @Override + public void setUp() throws Exception { + super.setUp(); + metricsTelemetry = mock(MetricsTelemetry.class); + defaultMeterRegistry = new DefaultMetricsRegistry(metricsTelemetry); + } + + public void testCounter() { + Counter mockCounter = mock(Counter.class); + when(defaultMeterRegistry.createCounter(any(String.class), any(String.class), any(String.class))).thenReturn(mockCounter); + Counter counter = defaultMeterRegistry.createCounter( + "org.opensearch.telemetry.metrics.DefaultMeterRegistryTests.testCounter", + "test counter", + "1" + ); + assertSame(mockCounter, counter); + } + + public void testUpDownCounter() { + Counter mockCounter = mock(Counter.class); + when(defaultMeterRegistry.createUpDownCounter(any(String.class), any(String.class), any(String.class))).thenReturn(mockCounter); + Counter counter = defaultMeterRegistry.createUpDownCounter( + "org.opensearch.telemetry.metrics.DefaultMeterRegistryTests.testUpDownCounter", + "test up-down counter", + "1" + ); + assertSame(mockCounter, counter); + } + +} diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultSpanScopeTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultScopedSpanTests.java similarity index 50% rename from libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultSpanScopeTests.java rename to libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultScopedSpanTests.java index eea6b77ce6e1e..1d4871fe1419e 100644 --- a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultSpanScopeTests.java +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultScopedSpanTests.java @@ -10,66 +10,71 @@ import org.opensearch.test.OpenSearchTestCase; -import java.util.function.Consumer; - import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; -public class DefaultSpanScopeTests extends OpenSearchTestCase { +public class DefaultScopedSpanTests extends OpenSearchTestCase { @SuppressWarnings("unchecked") public void testClose() { Span mockSpan = mock(Span.class); - Consumer mockConsumer = mock(Consumer.class); - DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, mockConsumer); + SpanScope mockSpanScope = mock(SpanScope.class); + DefaultScopedSpan defaultSpanScope = new DefaultScopedSpan(mockSpan, mockSpanScope); defaultSpanScope.close(); - verify(mockConsumer).accept(mockSpan); + verify(mockSpan).endSpan(); + verify(mockSpanScope).close(); } public void testAddSpanAttributeString() { Span mockSpan = mock(Span.class); - DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); - defaultSpanScope.addSpanAttribute("key", "value"); + SpanScope mockSpanScope = mock(SpanScope.class); + DefaultScopedSpan defaultSpanScope = new DefaultScopedSpan(mockSpan, mockSpanScope); + defaultSpanScope.addAttribute("key", "value"); verify(mockSpan).addAttribute("key", "value"); } public void testAddSpanAttributeLong() { Span mockSpan = mock(Span.class); - DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); - defaultSpanScope.addSpanAttribute("key", 1L); + SpanScope mockSpanScope = mock(SpanScope.class); + DefaultScopedSpan defaultSpanScope = new DefaultScopedSpan(mockSpan, mockSpanScope); + defaultSpanScope.addAttribute("key", 1L); verify(mockSpan).addAttribute("key", 1L); } public void testAddSpanAttributeDouble() { Span mockSpan = mock(Span.class); - DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); - defaultSpanScope.addSpanAttribute("key", 1.0); + SpanScope mockSpanScope = mock(SpanScope.class); + DefaultScopedSpan defaultSpanScope = new DefaultScopedSpan(mockSpan, mockSpanScope); + defaultSpanScope.addAttribute("key", 1.0); verify(mockSpan).addAttribute("key", 1.0); } public void testAddSpanAttributeBoolean() { Span mockSpan = mock(Span.class); - DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); - defaultSpanScope.addSpanAttribute("key", true); + SpanScope mockSpanScope = mock(SpanScope.class); + DefaultScopedSpan defaultSpanScope = new DefaultScopedSpan(mockSpan, mockSpanScope); + defaultSpanScope.addAttribute("key", true); verify(mockSpan).addAttribute("key", true); } public void testAddEvent() { Span mockSpan = mock(Span.class); - DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); - defaultSpanScope.addSpanEvent("eventName"); + SpanScope mockSpanScope = mock(SpanScope.class); + DefaultScopedSpan defaultSpanScope = new DefaultScopedSpan(mockSpan, mockSpanScope); + defaultSpanScope.addEvent("eventName"); verify(mockSpan).addEvent("eventName"); } public void testSetError() { Span mockSpan = mock(Span.class); - DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); + SpanScope mockSpanScope = mock(SpanScope.class); + DefaultScopedSpan defaultSpanScope = new DefaultScopedSpan(mockSpan, mockSpanScope); Exception ex = new Exception("error"); defaultSpanScope.setError(ex); diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java index 07abd43c8dd7b..2a791f1ae4164 100644 --- a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java @@ -10,15 +10,18 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.node.Node; import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.telemetry.tracing.MockSpan; import org.opensearch.test.telemetry.tracing.MockTracingTelemetry; -import org.junit.Assert; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; -import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -32,59 +35,79 @@ public class DefaultTracerTests extends OpenSearchTestCase { private Span mockSpan; private Span mockParentSpan; + private SpanScope mockSpanScope; + private ThreadPool threadPool; + private ExecutorService executorService; + private SpanCreationContext spanCreationContext; + @Override public void setUp() throws Exception { super.setUp(); + threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "default tracer tests").build()); + executorService = threadPool.executor(ThreadPool.Names.GENERIC); setupMocks(); } @Override public void tearDown() throws Exception { super.tearDown(); + executorService.shutdown(); + threadPool.shutdownNow(); } public void testCreateSpan() { DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); - defaultTracer.startSpan("span_name"); + defaultTracer.startSpan(spanCreationContext); - Assert.assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + String spanName = defaultTracer.getCurrentSpan().getSpan().getSpanName(); + assertEquals("span_name", spanName); + assertTrue(defaultTracer.isRecording()); } + @SuppressWarnings("unchecked") public void testCreateSpanWithAttributesWithMock() { DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); Attributes attributes = Attributes.create().addAttribute("name", "value"); - when(mockTracingTelemetry.createSpan("span_name", mockParentSpan, attributes)).thenReturn(mockSpan); - defaultTracer.startSpan("span_name", attributes); - verify(mockTracingTelemetry).createSpan("span_name", mockParentSpan, attributes); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", attributes, mockParentSpan); + when(mockTracingTelemetry.createSpan(eq(spanCreationContext), eq(mockParentSpan))).thenReturn(mockSpan); + defaultTracer.startSpan(spanCreationContext); + verify(mockTracingTelemetry).createSpan(eq(spanCreationContext), eq(mockParentSpan)); } + @SuppressWarnings("unchecked") public void testCreateSpanWithAttributesWithParentMock() { DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); Attributes attributes = Attributes.create().addAttribute("name", "value"); - when(mockTracingTelemetry.createSpan("span_name", mockParentSpan, attributes)).thenReturn(mockSpan); - defaultTracer.startSpan("span_name", new SpanContext(mockParentSpan), attributes); - verify(mockTracingTelemetry).createSpan("span_name", mockParentSpan, attributes); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", attributes, mockParentSpan); + when(mockTracingTelemetry.createSpan(eq(spanCreationContext), eq(mockParentSpan))).thenReturn(mockSpan); + defaultTracer.startSpan(spanCreationContext); + verify(mockTracingTelemetry).createSpan(eq(spanCreationContext), eq(mockParentSpan)); verify(mockTracerContextStorage, never()).get(TracerContextStorage.CURRENT_SPAN); } public void testCreateSpanWithAttributes() { TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); DefaultTracer defaultTracer = new DefaultTracer( tracingTelemetry, - new ThreadContextBasedTracerContextStorage(new ThreadContext(Settings.EMPTY), tracingTelemetry) + new ThreadContextBasedTracerContextStorage(threadContext, tracingTelemetry) ); - defaultTracer.startSpan( + SpanCreationContext spanCreationContext = buildSpanCreationContext( "span_name", - Attributes.create().addAttribute("key1", 1.0).addAttribute("key2", 2l).addAttribute("key3", true).addAttribute("key4", "key4") + Attributes.create().addAttribute("key1", 1.0).addAttribute("key2", 2l).addAttribute("key3", true).addAttribute("key4", "key4"), + null ); - Assert.assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); - Assert.assertEquals(1.0, ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key1")); - Assert.assertEquals(2l, ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key2")); - Assert.assertEquals(true, ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key3")); - Assert.assertEquals("key4", ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key4")); + Span span = defaultTracer.startSpan(spanCreationContext); + + assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(1.0, ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key1")); + assertEquals(2l, ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key2")); + assertEquals(true, ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key3")); + assertEquals("key4", ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("key4")); + span.endSpan(); } public void testCreateSpanWithParent() { @@ -94,35 +117,277 @@ public void testCreateSpanWithParent() { new ThreadContextBasedTracerContextStorage(new ThreadContext(Settings.EMPTY), tracingTelemetry) ); - defaultTracer.startSpan("span_name", null); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", null, null); + + Span span = defaultTracer.startSpan(spanCreationContext, null); SpanContext parentSpan = defaultTracer.getCurrentSpan(); - defaultTracer.startSpan("span_name_1", parentSpan, Attributes.EMPTY); + SpanCreationContext spanCreationContext1 = buildSpanCreationContext("span_name_1", Attributes.EMPTY, parentSpan.getSpan()); + + Span span1 = defaultTracer.startSpan(spanCreationContext1); - Assert.assertEquals("span_name_1", defaultTracer.getCurrentSpan().getSpan().getSpanName()); - Assert.assertEquals(parentSpan.getSpan(), defaultTracer.getCurrentSpan().getSpan().getParentSpan()); + assertEquals("span_name_1", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(parentSpan.getSpan(), defaultTracer.getCurrentSpan().getSpan().getParentSpan()); + span1.endSpan(); + span.endSpan(); + } + + @SuppressWarnings("unchecked") + public void testCreateSpanWithContext() { + DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); + Attributes attributes = Attributes.create().addAttribute("name", "value"); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", attributes, mockParentSpan); + when(mockTracingTelemetry.createSpan(eq(spanCreationContext), eq(mockParentSpan))).thenReturn(mockSpan); + defaultTracer.startSpan(spanCreationContext); + verify(mockTracingTelemetry).createSpan(eq(spanCreationContext), eq(mockParentSpan)); } public void testCreateSpanWithNullParent() { TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); DefaultTracer defaultTracer = new DefaultTracer( tracingTelemetry, - new ThreadContextBasedTracerContextStorage(new ThreadContext(Settings.EMPTY), tracingTelemetry) + new ThreadContextBasedTracerContextStorage(threadContext, tracingTelemetry) ); - defaultTracer.startSpan("span_name", null, Attributes.EMPTY); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", Attributes.EMPTY, null); - Assert.assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); - Assert.assertEquals(null, defaultTracer.getCurrentSpan().getSpan().getParentSpan()); + Span span = defaultTracer.startSpan(spanCreationContext); + + assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(null, defaultTracer.getCurrentSpan().getSpan().getParentSpan()); + span.endSpan(); } - public void testEndSpanByClosingScope() { - DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); - try (SpanScope spanScope = defaultTracer.startSpan("span_name")) { - verify(mockTracerContextStorage).put(TracerContextStorage.CURRENT_SPAN, mockSpan); + public void testEndSpanByClosingScopedSpan() { + TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + ThreadContextBasedTracerContextStorage spanTracerStorage = new ThreadContextBasedTracerContextStorage( + threadContext, + tracingTelemetry + ); + DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", Attributes.EMPTY, null); + + ScopedSpan scopedSpan = defaultTracer.startScopedSpan(spanCreationContext); + + assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(((DefaultScopedSpan) scopedSpan).getSpanScope(), DefaultSpanScope.getCurrentSpanScope()); + scopedSpan.close(); + assertTrue(((MockSpan) ((DefaultScopedSpan) scopedSpan).getSpan()).hasEnded()); + assertEquals(null, defaultTracer.getCurrentSpan()); + assertEquals(null, DefaultSpanScope.getCurrentSpanScope()); + + } + + public void testEndSpanByClosingScopedSpanMultiple() { + TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + ThreadContextBasedTracerContextStorage spanTracerStorage = new ThreadContextBasedTracerContextStorage( + threadContext, + tracingTelemetry + ); + DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", Attributes.EMPTY, null); + SpanCreationContext spanCreationContext1 = buildSpanCreationContext("span_name_1", Attributes.EMPTY, null); + + ScopedSpan scopedSpan = defaultTracer.startScopedSpan(spanCreationContext); + ScopedSpan scopedSpan1 = defaultTracer.startScopedSpan(spanCreationContext1); + + assertEquals("span_name_1", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(((DefaultScopedSpan) scopedSpan1).getSpanScope(), DefaultSpanScope.getCurrentSpanScope()); + + scopedSpan1.close(); + assertTrue(((MockSpan) ((DefaultScopedSpan) scopedSpan1).getSpan()).hasEnded()); + assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(((DefaultScopedSpan) scopedSpan).getSpanScope(), DefaultSpanScope.getCurrentSpanScope()); + + scopedSpan.close(); + assertTrue(((MockSpan) ((DefaultScopedSpan) scopedSpan).getSpan()).hasEnded()); + assertEquals(null, defaultTracer.getCurrentSpan()); + assertEquals(null, DefaultSpanScope.getCurrentSpanScope()); + + } + + public void testEndSpanByClosingSpanScope() { + TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + ThreadContextBasedTracerContextStorage spanTracerStorage = new ThreadContextBasedTracerContextStorage( + threadContext, + tracingTelemetry + ); + DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); + SpanCreationContext spanCreationContext = buildSpanCreationContext("span_name", Attributes.EMPTY, null); + Span span = defaultTracer.startSpan(spanCreationContext); + SpanScope spanScope = defaultTracer.withSpanInScope(span); + assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(spanScope, DefaultSpanScope.getCurrentSpanScope()); + + span.endSpan(); + spanScope.close(); + assertEquals(null, defaultTracer.getCurrentSpan()); + assertTrue(((MockSpan) span).hasEnded()); + + } + + public void testEndSpanByClosingSpanScopeMultiple() { + TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + ThreadContextBasedTracerContextStorage spanTracerStorage = new ThreadContextBasedTracerContextStorage( + threadContext, + tracingTelemetry + ); + DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); + Span span = defaultTracer.startSpan(buildSpanCreationContext("span_name", Attributes.EMPTY, null)); + Span span1 = defaultTracer.startSpan(buildSpanCreationContext("span_name_1", Attributes.EMPTY, null)); + SpanScope spanScope = defaultTracer.withSpanInScope(span); + SpanScope spanScope1 = defaultTracer.withSpanInScope(span1); + assertEquals("span_name_1", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(spanScope1, DefaultSpanScope.getCurrentSpanScope()); + + span1.endSpan(); + spanScope1.close(); + + assertEquals("span_name", defaultTracer.getCurrentSpan().getSpan().getSpanName()); + assertEquals(spanScope, DefaultSpanScope.getCurrentSpanScope()); + assertTrue(((MockSpan) span1).hasEnded()); + assertFalse(((MockSpan) span).hasEnded()); + span.endSpan(); + spanScope.close(); + + assertEquals(null, defaultTracer.getCurrentSpan()); + assertEquals(null, DefaultSpanScope.getCurrentSpanScope()); + assertTrue(((MockSpan) span).hasEnded()); + assertTrue(((MockSpan) span1).hasEnded()); + + } + + /** + * 1. CreateSpan in ThreadA (NotScopedSpan) + * 2. create Async task and pass the span + * 3. Scope.close + * 4. verify the current_span is still the same on async thread as the 2 + * 5. verify the main thread has current span as null. + */ + public void testSpanAcrossThreads() { + TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + + ThreadContextBasedTracerContextStorage spanTracerStorage = new ThreadContextBasedTracerContextStorage( + threadContext, + tracingTelemetry + ); + DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); + + CompletableFuture asyncTask = CompletableFuture.runAsync(() -> { + // create a span + Span span = defaultTracer.startSpan(buildSpanCreationContext("span_name_t_1", Attributes.EMPTY, null)); + SpanScope spanScope = defaultTracer.withSpanInScope(span); + + CompletableFuture asyncTask1 = CompletableFuture.runAsync(() -> { + Span spanT2 = defaultTracer.startSpan(buildSpanCreationContext("span_name_t_2", Attributes.EMPTY, null)); + SpanScope spanScopeT2 = defaultTracer.withSpanInScope(spanT2); + assertEquals(spanT2, defaultTracer.getCurrentSpan().getSpan()); + + spanScopeT2.close(); + spanT2.endSpan(); + assertEquals(null, defaultTracer.getCurrentSpan()); + }, executorService); + asyncTask1.join(); + spanScope.close(); + span.endSpan(); + assertEquals(null, defaultTracer.getCurrentSpan()); + }, executorService); + asyncTask.join(); + } + + public void testSpanCloseOnThread2() { + TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + ThreadContextBasedTracerContextStorage spanTracerStorage = new ThreadContextBasedTracerContextStorage( + threadContext, + tracingTelemetry + ); + DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); + final Span span = defaultTracer.startSpan(buildSpanCreationContext("span_name_t1", Attributes.EMPTY, null)); + try (SpanScope spanScope = defaultTracer.withSpanInScope(span)) { + CompletableFuture asyncTask = CompletableFuture.runAsync(() -> async(new ActionListener() { + @Override + public void onResponse(Boolean response) { + try (SpanScope s = defaultTracer.withSpanInScope(span)) { + assertEquals(span, defaultTracer.getCurrentSpan().getSpan()); + } finally { + span.endSpan(); + } + } + + @Override + public void onFailure(Exception e) { + + } + }), executorService); + assertEquals(span, defaultTracer.getCurrentSpan().getSpan()); + asyncTask.join(); } - verify(mockTracerContextStorage).put(TracerContextStorage.CURRENT_SPAN, mockParentSpan); + assertEquals(null, defaultTracer.getCurrentSpan()); + } + + private void async(ActionListener actionListener) { + actionListener.onResponse(true); + } + + /** + * 1. CreateSpan in ThreadA (NotScopedSpan) + * 2. create Async task and pass the span + * 3. Inside Async task start a new span. + * 4. Scope.close + * 5. Parent Scope.close + * 6. verify the current_span is still the same on async thread as the 2 + * 7. verify the main thread has current span as null. + */ + public void testSpanAcrossThreadsMultipleSpans() { + TracingTelemetry tracingTelemetry = new MockTracingTelemetry(); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + + ThreadContextBasedTracerContextStorage spanTracerStorage = new ThreadContextBasedTracerContextStorage( + threadContext, + tracingTelemetry + ); + DefaultTracer defaultTracer = new DefaultTracer(tracingTelemetry, spanTracerStorage); + + CompletableFuture asyncTask = CompletableFuture.runAsync(() -> { + // create a parent span + Span parentSpan = defaultTracer.startSpan(buildSpanCreationContext("p_span_name", Attributes.EMPTY, null)); + SpanScope parentSpanScope = defaultTracer.withSpanInScope(parentSpan); + // create a span + Span span = defaultTracer.startSpan(buildSpanCreationContext("span_name_t_1", Attributes.EMPTY, null)); + SpanScope spanScope = defaultTracer.withSpanInScope(span); + + CompletableFuture asyncTask1 = CompletableFuture.runAsync(() -> { + Span spanT2 = defaultTracer.startSpan(buildSpanCreationContext("span_name_t_2", Attributes.EMPTY, null)); + SpanScope spanScopeT2 = defaultTracer.withSpanInScope(spanT2); + Span spanT21 = defaultTracer.startSpan(buildSpanCreationContext("span_name_t_2", Attributes.EMPTY, null)); + SpanScope spanScopeT21 = defaultTracer.withSpanInScope(spanT21); + assertEquals(spanT21, defaultTracer.getCurrentSpan().getSpan()); + spanScopeT21.close(); + spanT21.endSpan(); + + spanScopeT2.close(); + spanT2.endSpan(); + + assertEquals(null, defaultTracer.getCurrentSpan()); + }, executorService); + + asyncTask1.join(); + + spanScope.close(); + span.endSpan(); + parentSpanScope.close(); + parentSpan.endSpan(); + assertEquals(null, defaultTracer.getCurrentSpan()); + }, executorService); + asyncTask.join(); } public void testClose() throws IOException { @@ -138,6 +403,7 @@ private void setupMocks() { mockTracingTelemetry = mock(TracingTelemetry.class); mockSpan = mock(Span.class); mockParentSpan = mock(Span.class); + mockSpanScope = mock(SpanScope.class); mockTracerContextStorage = mock(TracerContextStorage.class); when(mockSpan.getSpanName()).thenReturn("span_name"); when(mockSpan.getSpanId()).thenReturn("span_id"); @@ -145,7 +411,16 @@ private void setupMocks() { when(mockSpan.getParentSpan()).thenReturn(mockParentSpan); when(mockParentSpan.getSpanId()).thenReturn("parent_span_id"); when(mockParentSpan.getTraceId()).thenReturn("trace_id"); - when(mockTracerContextStorage.get(TracerContextStorage.CURRENT_SPAN)).thenReturn(mockParentSpan, mockSpan); - when(mockTracingTelemetry.createSpan(eq("span_name"), eq(mockParentSpan), any(Attributes.class))).thenReturn(mockSpan); + spanCreationContext = buildSpanCreationContext("span_name", Attributes.EMPTY, mockParentSpan); + when(mockTracerContextStorage.get(TracerContextStorage.CURRENT_SPAN)).thenReturn(mockSpan, mockParentSpan); + when(mockTracingTelemetry.createSpan(eq(spanCreationContext), eq(mockParentSpan))).thenReturn(mockSpan); + } + + private SpanCreationContext buildSpanCreationContext(String spanName, Attributes attributes, Span parentSpan) { + SpanCreationContext spanCreationContext = SpanCreationContext.internal().name(spanName).attributes(attributes); + if (parentSpan != null) { + spanCreationContext.parent(new SpanContext(parentSpan)); + } + return spanCreationContext; } } diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/TraceableRunnableTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/TraceableRunnableTests.java index f1df3b24e1c9b..4c4f762653d57 100644 --- a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/TraceableRunnableTests.java +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/TraceableRunnableTests.java @@ -28,42 +28,53 @@ public class TraceableRunnableTests extends OpenSearchTestCase { public void testRunnableWithNullParent() throws Exception { String spanName = "testRunnable"; - DefaultTracer defaultTracer = new DefaultTracer(new MockTracingTelemetry(), contextStorage); + final DefaultTracer defaultTracer = new DefaultTracer(new MockTracingTelemetry(), contextStorage); final AtomicBoolean isRunnableCompleted = new AtomicBoolean(false); - + final AtomicReference spanNameCaptured = new AtomicReference<>(); + final AtomicReference attributeValue = new AtomicReference<>(); TraceableRunnable traceableRunnable = new TraceableRunnable( defaultTracer, - spanName, - null, - Attributes.create().addAttribute("name", "value"), + SpanCreationContext.internal().name(spanName).attributes(Attributes.create().addAttribute("name", "value")), () -> { + spanNameCaptured.set(defaultTracer.getCurrentSpan().getSpan().getSpanName()); + attributeValue.set((String) ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("name")); isRunnableCompleted.set(true); } ); traceableRunnable.run(); assertTrue(isRunnableCompleted.get()); - assertEquals(spanName, defaultTracer.getCurrentSpan().getSpan().getSpanName()); - assertEquals(null, defaultTracer.getCurrentSpan().getSpan().getParentSpan()); - assertEquals("value", ((MockSpan) defaultTracer.getCurrentSpan().getSpan()).getAttribute("name")); - + assertEquals(spanName, spanNameCaptured.get()); + assertEquals(null, defaultTracer.getCurrentSpan()); + assertEquals(null, defaultTracer.getCurrentSpan()); + assertEquals("value", attributeValue.get()); } public void testRunnableWithParent() throws Exception { String spanName = "testRunnable"; String parentSpanName = "parentSpan"; DefaultTracer defaultTracer = new DefaultTracer(new MockTracingTelemetry(), contextStorage); - defaultTracer.startSpan(parentSpanName); - SpanContext parentSpan = defaultTracer.getCurrentSpan(); - AtomicReference currrntSpan = new AtomicReference<>(new SpanContext(null)); + ScopedSpan scopedSpan = defaultTracer.startScopedSpan( + SpanCreationContext.internal().name(parentSpanName).attributes(Attributes.EMPTY) + ); + SpanContext parentSpanContext = defaultTracer.getCurrentSpan(); + AtomicReference currentSpan = new AtomicReference<>(); final AtomicBoolean isRunnableCompleted = new AtomicBoolean(false); - TraceableRunnable traceableRunnable = new TraceableRunnable(defaultTracer, spanName, parentSpan, Attributes.EMPTY, () -> { - isRunnableCompleted.set(true); - currrntSpan.set(defaultTracer.getCurrentSpan()); - }); + TraceableRunnable traceableRunnable = new TraceableRunnable( + defaultTracer, + SpanCreationContext.internal() + .name(spanName) + .attributes(Attributes.create().addAttribute("name", "value")) + .parent(parentSpanContext), + () -> { + isRunnableCompleted.set(true); + currentSpan.set(defaultTracer.getCurrentSpan()); + } + ); traceableRunnable.run(); assertTrue(isRunnableCompleted.get()); - assertEquals(spanName, currrntSpan.get().getSpan().getSpanName()); - assertEquals(parentSpan.getSpan(), currrntSpan.get().getSpan().getParentSpan()); - assertEquals(parentSpan.getSpan(), defaultTracer.getCurrentSpan().getSpan()); + assertEquals(spanName, currentSpan.get().getSpan().getSpanName()); + assertEquals(((DefaultScopedSpan) scopedSpan).getSpan(), currentSpan.get().getSpan().getParentSpan()); + assertEquals(((DefaultScopedSpan) scopedSpan).getSpan(), defaultTracer.getCurrentSpan().getSpan()); + scopedSpan.close(); } } diff --git a/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java index 44a4fe4143d05..eae7e522431de 100644 --- a/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java @@ -609,7 +609,7 @@ public void testCreateRootSubParser() throws IOException { /** * Generates a random object {"first_field": "foo", "marked_field": {...random...}, "last_field": "bar} - * + *

    * Returns the number of tokens in the marked field */ private static int generateRandomObjectForMarking(XContentBuilder builder) throws IOException { diff --git a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java index f7ab0db3c9607..24f74f3859157 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java +++ b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java @@ -89,4 +89,9 @@ protected Aggregator doCreateInternal( } return new MatrixStatsAggregator(name, typedValuesSources, searchContext, parent, multiValueMode, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/RunningStats.java b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/RunningStats.java index de67cc2930652..de6b59b1546a5 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/RunningStats.java +++ b/modules/aggs-matrix-stats/src/main/java/org/opensearch/search/aggregations/matrix/stats/RunningStats.java @@ -46,7 +46,7 @@ /** * Descriptive stats gathered per shard. Coordinating node computes final correlation and covariance stats * based on these descriptive stats. This single pass, parallel approach is based on: - * + *

    * http://prod.sandia.gov/techlib/access-control.cgi/2008/086212.pdf */ public class RunningStats implements Writeable, Cloneable { @@ -222,7 +222,7 @@ private void updateCovariance(final String[] fieldNames, final Map * running computations taken from: http://prod.sandia.gov/techlib/access-control.cgi/2008/086212.pdf **/ public void merge(final RunningStats other) { diff --git a/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java b/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java index 8c2f83bf83d85..7b1db7d648ebd 100644 --- a/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java +++ b/modules/analysis-common/src/internalClusterTest/java/org/opensearch/analysis/common/QueryStringWithAnalyzersIT.java @@ -32,20 +32,42 @@ package org.opensearch.analysis.common; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.Operator; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; import static org.opensearch.index.query.QueryBuilders.queryStringQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -public class QueryStringWithAnalyzersIT extends OpenSearchIntegTestCase { +public class QueryStringWithAnalyzersIT extends ParameterizedOpenSearchIntegTestCase { + + public QueryStringWithAnalyzersIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Collection> nodePlugins() { return Arrays.asList(CommonAnalysisPlugin.class); diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisPlugin.java index ac68b547e06f1..8a6f0b045b275 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisPlugin.java @@ -89,6 +89,7 @@ import org.apache.lucene.analysis.lt.LithuanianAnalyzer; import org.apache.lucene.analysis.lv.LatvianAnalyzer; import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter; +import org.apache.lucene.analysis.miscellaneous.DelimitedTermFrequencyTokenFilter; import org.apache.lucene.analysis.miscellaneous.DisableGraphAttribute; import org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilter; import org.apache.lucene.analysis.miscellaneous.LengthFilter; @@ -265,6 +266,7 @@ public Map> getTokenFilters() { ); filters.put("decimal_digit", DecimalDigitFilterFactory::new); filters.put("delimited_payload", DelimitedPayloadTokenFilterFactory::new); + filters.put("delimited_term_freq", DelimitedTermFrequencyTokenFilterFactory::new); filters.put("dictionary_decompounder", requiresAnalysisSettings(DictionaryCompoundWordTokenFilterFactory::new)); filters.put("dutch_stem", DutchStemTokenFilterFactory::new); filters.put("edge_ngram", EdgeNGramTokenFilterFactory::new); @@ -483,6 +485,13 @@ public List getPreConfiguredTokenFilters() { ) ) ); + filters.add( + PreConfiguredTokenFilter.singleton( + "delimited_term_freq", + false, + input -> new DelimitedTermFrequencyTokenFilter(input, DelimitedTermFrequencyTokenFilterFactory.DEFAULT_DELIMITER) + ) + ); filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer()))); filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, false, input -> new EdgeNGramTokenFilter(input, 1))); filters.add(PreConfiguredTokenFilter.openSearchVersion("edgeNGram", false, false, (reader, version) -> { @@ -547,7 +556,7 @@ public List getPreConfiguredTokenFilters() { filters.add(PreConfiguredTokenFilter.singleton("scandinavian_normalization", true, ScandinavianNormalizationFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("shingle", false, false, input -> { TokenStream ts = new ShingleFilter(input); - /** + /* * We disable the graph analysis on this token stream * because it produces shingles of different size. * Graph analysis on such token stream is useless and dangerous as it may create too many paths diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/DelimitedTermFrequencyTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/DelimitedTermFrequencyTokenFilterFactory.java new file mode 100644 index 0000000000000..8929a7c54ef4c --- /dev/null +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/DelimitedTermFrequencyTokenFilterFactory.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.analysis.common; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.miscellaneous.DelimitedTermFrequencyTokenFilter; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.Environment; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.analysis.AbstractTokenFilterFactory; + +public class DelimitedTermFrequencyTokenFilterFactory extends AbstractTokenFilterFactory { + public static final char DEFAULT_DELIMITER = '|'; + private static final String DELIMITER = "delimiter"; + private final char delimiter; + + DelimitedTermFrequencyTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + super(indexSettings, name, settings); + delimiter = parseDelimiter(settings); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + return new DelimitedTermFrequencyTokenFilter(tokenStream, delimiter); + } + + private static char parseDelimiter(Settings settings) { + String delimiter = settings.get(DELIMITER); + if (delimiter == null) { + return DEFAULT_DELIMITER; + } else if (delimiter.length() == 1) { + return delimiter.charAt(0); + } + + throw new IllegalArgumentException( + "Setting [" + DELIMITER + "] must be a single, non-null character. [" + delimiter + "] was provided." + ); + } +} diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/KeywordMarkerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/KeywordMarkerTokenFilterFactory.java index ad968aeee62cb..e9f3fd96dd69d 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/KeywordMarkerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/KeywordMarkerTokenFilterFactory.java @@ -49,12 +49,12 @@ * A factory for creating keyword marker token filters that prevent tokens from * being modified by stemmers. Two types of keyword marker filters are available: * the {@link SetKeywordMarkerFilter} and the {@link PatternKeywordMarkerFilter}. - * + *

    * The {@link SetKeywordMarkerFilter} uses a set of keywords to denote which tokens * should be excluded from stemming. This filter is created if the settings include * {@code keywords}, which contains the list of keywords, or {@code `keywords_path`}, * which contains a path to a file in the config directory with the keywords. - * + *

    * The {@link PatternKeywordMarkerFilter} uses a regular expression pattern to match * against tokens that should be excluded from stemming. This filter is created if * the settings include {@code keywords_pattern}, which contains the regular expression diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MappingCharFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MappingCharFilterFactory.java index bd241de749f11..d6d9f8975f2fc 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MappingCharFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/MappingCharFilterFactory.java @@ -54,7 +54,7 @@ public class MappingCharFilterFactory extends AbstractCharFilterFactory implemen MappingCharFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name); - List> rules = Analysis.parseWordList(env, settings, "mappings", this::parse, false); + List> rules = Analysis.parseWordList(env, settings, "mappings", this::parse); if (rules == null) { throw new IllegalArgumentException("mapping requires either `mappings` or `mappings_path` to be configured"); } diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SnowballAnalyzer.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SnowballAnalyzer.java index 78d151ee16c3b..04786689b50f0 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SnowballAnalyzer.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/SnowballAnalyzer.java @@ -45,7 +45,7 @@ /** Filters {@link StandardTokenizer} with {@link * LowerCaseFilter}, {@link StopFilter} and {@link SnowballFilter}. - * + *

    * Available stemmers are listed in org.tartarus.snowball.ext. The name of a * stemmer is the part of the class name before "Stemmer", e.g., the stemmer in * {@link org.tartarus.snowball.ext.EnglishStemmer} is named "English". diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java index 2c3864a36fd22..d28155272a9db 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/BaseWordDelimiterTokenFilterFactoryTestCase.java @@ -211,8 +211,8 @@ private void createTokenFilterFactoryWithTypeTable(String[] rules) throws IOExce } public void testTypeTableParsingError() { - String[] rules = { "# This is a comment", "$ => DIGIT", "\\u200D => ALPHANUM", "abc => ALPHA" }; + String[] rules = { "# This is a comment", "# => ALPHANUM", "$ => DIGIT", "\\u200D => ALPHANUM", "abc => ALPHA" }; RuntimeException ex = expectThrows(RuntimeException.class, () -> createTokenFilterFactoryWithTypeTable(rules)); - assertEquals("Line [4]: Invalid mapping rule: [abc => ALPHA]. Only a single character is allowed.", ex.getMessage()); + assertEquals("Line [5]: Invalid mapping rule: [abc => ALPHA]. Only a single character is allowed.", ex.getMessage()); } } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java index 4cf0d1de28717..e0e99cdc31672 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CommonAnalysisFactoryTests.java @@ -145,6 +145,7 @@ protected Map> getTokenFilters() { filters.put("cjkwidth", CJKWidthFilterFactory.class); filters.put("cjkbigram", CJKBigramFilterFactory.class); filters.put("delimitedpayload", DelimitedPayloadTokenFilterFactory.class); + filters.put("delimitedtermfrequency", DelimitedTermFrequencyTokenFilterFactory.class); filters.put("keepword", KeepWordFilterFactory.class); filters.put("type", KeepTypesFilterFactory.class); filters.put("classic", ClassicFilterFactory.class); @@ -202,6 +203,7 @@ protected Map> getPreConfiguredTokenFilters() { filters.put("decimal_digit", null); filters.put("delimited_payload_filter", org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterFactory.class); filters.put("delimited_payload", org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterFactory.class); + filters.put("delimited_term_freq", org.apache.lucene.analysis.miscellaneous.DelimitedTermFrequencyTokenFilterFactory.class); filters.put("dutch_stem", SnowballPorterFilterFactory.class); filters.put("edge_ngram", null); filters.put("edgeNGram", null); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DelimitedTermFrequencyTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DelimitedTermFrequencyTokenFilterFactoryTests.java new file mode 100644 index 0000000000000..bb8698e535a62 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/DelimitedTermFrequencyTokenFilterFactoryTests.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.analysis.common; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.core.WhitespaceTokenizer; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.TermFrequencyAttribute; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.Environment; +import org.opensearch.index.analysis.AnalysisTestsHelper; +import org.opensearch.index.analysis.TokenFilterFactory; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.OpenSearchTokenStreamTestCase; + +import java.io.StringReader; + +public class DelimitedTermFrequencyTokenFilterFactoryTests extends OpenSearchTokenStreamTestCase { + + public void testDefault() throws Exception { + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( + Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put("index.analysis.filter.my_delimited_term_freq.type", "delimited_term_freq") + .build(), + new CommonAnalysisPlugin() + ); + doTest(analysis, "cat|4 dog|5"); + } + + public void testDelimiter() throws Exception { + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings( + Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put("index.analysis.filter.my_delimited_term_freq.type", "delimited_term_freq") + .put("index.analysis.filter.my_delimited_term_freq.delimiter", ":") + .build(), + new CommonAnalysisPlugin() + ); + doTest(analysis, "cat:4 dog:5"); + } + + public void testDelimiterLongerThanOneCharThrows() { + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> AnalysisTestsHelper.createTestAnalysisFromSettings( + Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put("index.analysis.filter.my_delimited_term_freq.type", "delimited_term_freq") + .put("index.analysis.filter.my_delimited_term_freq.delimiter", "^^") + .build(), + new CommonAnalysisPlugin() + ) + ); + + assertEquals("Setting [delimiter] must be a single, non-null character. [^^] was provided.", ex.getMessage()); + } + + private void doTest(OpenSearchTestCase.TestAnalysis analysis, String source) throws Exception { + TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_delimited_term_freq"); + Tokenizer tokenizer = new WhitespaceTokenizer(); + tokenizer.setReader(new StringReader(source)); + + TokenStream stream = tokenFilter.create(tokenizer); + + CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class); + TermFrequencyAttribute tfAtt = stream.getAttribute(TermFrequencyAttribute.class); + stream.reset(); + assertTermEquals("cat", stream, termAtt, tfAtt, 4); + assertTermEquals("dog", stream, termAtt, tfAtt, 5); + assertFalse(stream.incrementToken()); + stream.end(); + stream.close(); + } + + void assertTermEquals(String expected, TokenStream stream, CharTermAttribute termAtt, TermFrequencyAttribute tfAtt, int expectedTf) + throws Exception { + assertTrue(stream.incrementToken()); + assertEquals(expected, termAtt.toString()); + assertEquals(expectedTf, tfAtt.getTermFrequency()); + } +} diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java index 65cf6e21071a9..0ffb7f2114f28 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/HighlighterWithAnalyzersTests.java @@ -32,8 +32,11 @@ package org.opensearch.analysis.common; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; @@ -41,7 +44,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -55,6 +58,7 @@ import static org.opensearch.index.query.QueryBuilders.matchPhraseQuery; import static org.opensearch.index.query.QueryBuilders.matchQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.builder.SearchSourceBuilder.highlight; import static org.opensearch.search.builder.SearchSourceBuilder.searchSource; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -64,7 +68,25 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.startsWith; -public class HighlighterWithAnalyzersTests extends OpenSearchIntegTestCase { +public class HighlighterWithAnalyzersTests extends ParameterizedOpenSearchIntegTestCase { + + public HighlighterWithAnalyzersTests(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Collection> nodePlugins() { return Arrays.asList(CommonAnalysisPlugin.class); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MappingCharFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MappingCharFilterFactoryTests.java index 843a3cecd56c0..2ef9f4e91655f 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MappingCharFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/MappingCharFilterFactoryTests.java @@ -37,6 +37,7 @@ public static CharFilterFactory create(String... rules) throws IOException { public void testRulesOk() throws IOException { MappingCharFilterFactory mappingCharFilterFactory = (MappingCharFilterFactory) create( + "# This is a comment", "# => _hashtag_", ":) => _happy_", ":( => _sad_" @@ -64,7 +65,10 @@ public void testRuleError() { } public void testRulePartError() { - RuntimeException ex = expectThrows(RuntimeException.class, () -> create("# => _hashtag_", ":) => _happy_", "a:b")); - assertEquals("Line [3]: Invalid mapping rule : [a:b]", ex.getMessage()); + RuntimeException ex = expectThrows( + RuntimeException.class, + () -> create("# This is a comment", "# => _hashtag_", ":) => _happy_", "a:b") + ); + assertEquals("Line [4]: Invalid mapping rule : [a:b]", ex.getMessage()); } } diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml index 40c82ff185661..802c79c780689 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml @@ -127,6 +127,69 @@ - match: { tokens.2.token: brown } - match: { tokens.3.token: fox } + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter + split_on_numerics: false + type_table: + - "\\u0023 => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter + split_on_numerics: false + type_table: + - "# This is a comment" + - "# => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter + split_on_numerics: false + type_table: + - "# This is a comment" + - "# => ALPHANUM" + - "@ => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + --- "word_delimiter_graph": - do: @@ -231,6 +294,69 @@ - match: { detail.tokenfilters.0.tokens.5.end_offset: 19 } - match: { detail.tokenfilters.0.tokens.5.position: 5 } + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter_graph + split_on_numerics: false + type_table: + - "\\u0023 => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter_graph + split_on_numerics: false + type_table: + - "# This is a comment" + - "# => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + + - do: + indices.analyze: + body: + text: 'text1 #text2' + tokenizer: whitespace + filter: + - type: word_delimiter_graph + split_on_numerics: false + type_table: + - "# This is a comment" + - "# => ALPHANUM" + - "@ => ALPHANUM" + - length: { tokens: 2 } + - match: { tokens.0.token: text1 } + - match: { tokens.0.start_offset: 0 } + - match: { tokens.0.end_offset: 5 } + - match: { tokens.0.position: 0 } + - match: { tokens.1.token: "#text2" } + - match: { tokens.1.start_offset: 6 } + - match: { tokens.1.end_offset: 12 } + - match: { tokens.1.position: 1 } + --- "unique": - do: @@ -1198,6 +1324,46 @@ - match: { tokens.0.token: foo } --- +"delimited_term_freq": + - skip: + version: " - 2.9.99" + reason: "delimited_term_freq token filter was added in v2.10.0" + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + my_delimited_term_freq: + type: delimited_term_freq + delimiter: ^ + - do: + indices.analyze: + index: test + body: + text: foo^3 + tokenizer: keyword + filter: [my_delimited_term_freq] + attributes: termFrequency + explain: true + - length: { detail.tokenfilters: 1 } + - match: { detail.tokenfilters.0.tokens.0.token: foo } + - match: { detail.tokenfilters.0.tokens.0.termFrequency: 3 } + + # Test pre-configured token filter too: + - do: + indices.analyze: + body: + text: foo|100 + tokenizer: keyword + filter: [delimited_term_freq] + attributes: termFrequency + explain: true + - length: { detail.tokenfilters: 1 } + - match: { detail.tokenfilters.0.tokens.0.token: foo } + - match: { detail.tokenfilters.0.tokens.0.termFrequency: 100 } +--- "keep_filter": - do: indices.create: diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/50_char_filters.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/50_char_filters.yml index 0078575ae8e57..5e266c10cba8f 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/50_char_filters.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/50_char_filters.yml @@ -69,6 +69,7 @@ char_filter: - type: mapping mappings: + - "# This is a comment" - "# => _hashsign_" - "@ => _atsign_" - length: { tokens: 3 } diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java index b17f4804d4d50..c38b29502e282 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/GeoModulePluginIntegTestCase.java @@ -8,26 +8,50 @@ package org.opensearch.geo; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.geometry.utils.StandardValidator; import org.opensearch.geometry.utils.WellKnownText; import org.opensearch.index.mapper.GeoShapeFieldMapper; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; + /** * This is the base class for all the Geo related integration tests. Use this class to add the features and settings * for the test cluster on which integration tests are running. */ -public abstract class GeoModulePluginIntegTestCase extends OpenSearchIntegTestCase { +public abstract class GeoModulePluginIntegTestCase extends ParameterizedOpenSearchIntegTestCase { protected static final double GEOHASH_TOLERANCE = 1E-5D; protected static final WellKnownText WKT = new WellKnownText(true, new StandardValidator(true)); + public GeoModulePluginIntegTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + /** * Returns a collection of plugins that should be loaded on each node for doing the integration tests. As this * geo plugin is not getting packaged in a zip, we need to load it before the tests run. diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java index a9dd7d1fd22e7..7344903fd5220 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/MissingValueIT.java @@ -10,6 +10,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.settings.Settings; import org.opensearch.geo.GeoModulePluginIntegTestCase; import org.opensearch.geo.search.aggregations.common.GeoBoundsHelper; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; @@ -43,6 +44,10 @@ public class MissingValueIT extends GeoModulePluginIntegTestCase { private GeoPoint bottomRight; private GeoPoint topLeft; + public MissingValueIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override protected void setupSuiteScopeCluster() throws Exception { assertAcked( diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java index d9ff3e8f473ef..86d8ad2968e7f 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/AbstractGeoBucketAggregationIntegTest.java @@ -67,6 +67,10 @@ public abstract class AbstractGeoBucketAggregationIntegTest extends GeoModulePlu protected final Version version = VersionUtils.randomIndexCompatibleVersion(random()); + public AbstractGeoBucketAggregationIntegTest(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override protected boolean forbidPrivateIndexSettings() { return false; diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java index 459a0986d3103..4048bb62f8818 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoHashGridIT.java @@ -35,6 +35,7 @@ import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoShapeDocValue; +import org.opensearch.common.settings.Settings; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGrid; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; import org.opensearch.geo.search.aggregations.common.GeoBoundsHelper; @@ -64,6 +65,10 @@ public class GeoHashGridIT extends AbstractGeoBucketAggregationIntegTest { private static final String AGG_NAME = "geohashgrid"; + public GeoHashGridIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override public void setupSuiteScopeCluster() throws Exception { Random random = random(); diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoTileGridIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoTileGridIT.java index 6b09a843af566..2a5772d417530 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoTileGridIT.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/GeoTileGridIT.java @@ -12,6 +12,7 @@ import org.opensearch.common.geo.GeoBoundingBox; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoShapeDocValue; +import org.opensearch.common.settings.Settings; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGrid; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGridAggregationBuilder; import org.opensearch.geo.search.aggregations.common.GeoBoundsHelper; @@ -38,6 +39,10 @@ public class GeoTileGridIT extends AbstractGeoBucketAggregationIntegTest { private static final String AGG_NAME = "geotilegrid"; + public GeoTileGridIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override public void setupSuiteScopeCluster() throws Exception { final Random random = random(); diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/ShardReduceIT.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/ShardReduceIT.java index d22d2089a3ae3..85541c60f133c 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/ShardReduceIT.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/bucket/ShardReduceIT.java @@ -10,6 +10,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.geo.GeoModulePluginIntegTestCase; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGrid; import org.opensearch.geo.tests.common.AggregationBuilders; @@ -34,6 +35,10 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class ShardReduceIT extends GeoModulePluginIntegTestCase { + public ShardReduceIT(Settings dynamicSettings) { + super(dynamicSettings); + } + private IndexRequestBuilder indexDoc(String date, int value) throws Exception { return client().prepareIndex("idx") .setSource( diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java index d76104882d676..711744b944ce3 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/AbstractGeoAggregatorModulePluginTestCase.java @@ -65,6 +65,10 @@ public abstract class AbstractGeoAggregatorModulePluginTestCase extends GeoModul protected static Map expectedDocCountsForGeoHash = null; protected static Map expectedCentroidsForGeoHash = null; + public AbstractGeoAggregatorModulePluginTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex(UNMAPPED_IDX_NAME); diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsITTestCase.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsITTestCase.java index d95cd85b49cd4..1c28df6bc4ea2 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsITTestCase.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsITTestCase.java @@ -34,6 +34,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.util.BigArray; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.global.Global; @@ -61,6 +62,10 @@ public class GeoBoundsITTestCase extends AbstractGeoAggregatorModulePluginTestCase { private static final String aggName = "geoBounds"; + public GeoBoundsITTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + public void testSingleValuedField() throws Exception { SearchResponse response = client().prepareSearch(IDX_NAME) .addAggregation(geoBounds(aggName).field(SINGLE_VALUED_FIELD_NAME).wrapLongitude(false)) diff --git a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoCentroidITTestCase.java b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoCentroidITTestCase.java index 01d2656adb750..2dc8a91600419 100644 --- a/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoCentroidITTestCase.java +++ b/modules/geo/src/internalClusterTest/java/org/opensearch/geo/search/aggregations/metrics/GeoCentroidITTestCase.java @@ -34,6 +34,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.settings.Settings; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoGrid; import org.opensearch.geo.tests.common.AggregationBuilders; import org.opensearch.search.aggregations.metrics.GeoCentroid; @@ -51,6 +52,10 @@ public class GeoCentroidITTestCase extends AbstractGeoAggregatorModulePluginTestCase { private static final String aggName = "geoCentroid"; + public GeoCentroidITTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + public void testSingleValueFieldAsSubAggToGeohashGrid() throws Exception { SearchResponse response = client().prepareSearch(HIGH_CARD_IDX_NAME) .addAggregation( diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java index 9149b8939b739..665ea6c5f2f37 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileValuesSource.java @@ -48,7 +48,7 @@ /** * A {@link SingleDimensionValuesSource} for geotile values. - * + *

    * Since geotile values can be represented as long values, this class is almost the same as {@link LongValuesSource} * The main differences is {@link GeoTileValuesSource#setAfter(Comparable)} as it needs to accept geotile string values i.e. "zoom/x/y". * diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java index 5502e0c418cf4..b95b47a630916 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java @@ -196,4 +196,9 @@ static void registerAggregators(ValuesSourceRegistry.Builder builder) { true ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java index b8e3efbb891df..7253f45d2db10 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java @@ -194,4 +194,9 @@ static void registerAggregators(ValuesSourceRegistry.Builder builder) { true ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/cells/BoundedCellValues.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/cells/BoundedCellValues.java index 588c8bc59c2e0..6ff38fa28978e 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/cells/BoundedCellValues.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/cells/BoundedCellValues.java @@ -37,7 +37,7 @@ /** * Class representing {@link CellValues} whose values are filtered * according to whether they are within the specified {@link GeoBoundingBox}. - * + *

    * The specified bounding box is assumed to be bounded. * * @opensearch.internal diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java index 780f25ba3d7fb..fc9cce3cf98c1 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/metrics/GeoBoundsAggregatorFactory.java @@ -89,4 +89,9 @@ static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register(GeoBoundsAggregationBuilder.REGISTRY_KEY, CoreValuesSourceType.GEOPOINT, GeoBoundsAggregator::new, true); builder.register(GeoBoundsAggregationBuilder.REGISTRY_KEY, CoreValuesSourceType.GEO_SHAPE, GeoBoundsGeoShapeAggregator::new, true); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FailProcessorException.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FailProcessorException.java index 37320c0e900a5..7e114023fb86f 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FailProcessorException.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FailProcessorException.java @@ -37,7 +37,7 @@ /** * Exception class thrown by {@link FailProcessor}. - * + *

    * This exception is caught in the {@link CompoundProcessor} and * then changes the state of {@link IngestDocument}. This * exception should get serialized. diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ForEachProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ForEachProcessor.java index 741a4fb29cfb8..b7c417f5f44a5 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ForEachProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ForEachProcessor.java @@ -53,10 +53,10 @@ /** * A processor that for each value in a list executes a one or more processors. - * + *

    * This can be useful in cases to do string operations on json array of strings, * or remove a field from objects inside a json array. - * + *

    * Note that this processor is experimental. */ public final class ForEachProcessor extends AbstractProcessor implements WrappingProcessor { diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java index 5da3b6bea7bc2..93a35eef4d396 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java @@ -32,6 +32,7 @@ package org.opensearch.ingest.common; +import org.opensearch.core.common.Strings; import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.ConfigurationUtils; import org.opensearch.ingest.IngestDocument; @@ -66,16 +67,20 @@ public List getFields() { @Override public IngestDocument execute(IngestDocument document) { - if (ignoreMissing) { - fields.forEach(field -> { - String path = document.renderTemplate(field); - if (document.hasField(path)) { - document.removeField(path); + fields.forEach(field -> { + String path = document.renderTemplate(field); + final boolean fieldPathIsNullOrEmpty = Strings.isNullOrEmpty(path); + if (fieldPathIsNullOrEmpty || document.hasField(path) == false) { + if (ignoreMissing) { + return; + } else if (fieldPathIsNullOrEmpty) { + throw new IllegalArgumentException("field path cannot be null nor empty"); + } else { + throw new IllegalArgumentException("field [" + path + "] doesn't exist"); } - }); - } else { - fields.forEach(document::removeField); - } + } + document.removeField(path); + }); return document; } diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RenameProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RenameProcessor.java index af356eb10d79c..7564bbdf95f45 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RenameProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RenameProcessor.java @@ -32,6 +32,7 @@ package org.opensearch.ingest.common; +import org.opensearch.core.common.Strings; import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.ConfigurationUtils; import org.opensearch.ingest.IngestDocument; @@ -80,9 +81,12 @@ boolean isIgnoreMissing() { @Override public IngestDocument execute(IngestDocument document) { String path = document.renderTemplate(field); - if (document.hasField(path, true) == false) { + final boolean fieldPathIsNullOrEmpty = Strings.isNullOrEmpty(path); + if (fieldPathIsNullOrEmpty || document.hasField(path, true) == false) { if (ignoreMissing) { return document; + } else if (fieldPathIsNullOrEmpty) { + throw new IllegalArgumentException("field path cannot be null nor empty"); } else { throw new IllegalArgumentException("field [" + path + "] doesn't exist"); } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java index cf65236157111..8f729c6a39bbd 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java @@ -42,7 +42,6 @@ import java.util.HashMap; import java.util.Map; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class RemoveProcessorTests extends OpenSearchTestCase { @@ -67,12 +66,44 @@ public void testRemoveNonExistingField() throws Exception { config.put("field", fieldName); String processorTag = randomAlphaOfLength(10); Processor processor = new RemoveProcessor.Factory(TestTemplateService.instance()).create(null, processorTag, null, config); - try { - processor.execute(ingestDocument); - fail("remove field should have failed"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("not present as part of path [" + fieldName + "]")); - } + assertThrows( + "field [" + fieldName + "] doesn't exist", + IllegalArgumentException.class, + () -> { processor.execute(ingestDocument); } + ); + + Map configWithEmptyField = new HashMap<>(); + configWithEmptyField.put("field", ""); + processorTag = randomAlphaOfLength(10); + Processor removeProcessorWithEmptyField = new RemoveProcessor.Factory(TestTemplateService.instance()).create( + null, + processorTag, + null, + configWithEmptyField + ); + assertThrows( + "field path cannot be null nor empty", + IllegalArgumentException.class, + () -> removeProcessorWithEmptyField.execute(ingestDocument) + ); + } + + public void testRemoveEmptyField() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); + Map config = new HashMap<>(); + config.put("field", ""); + String processorTag = randomAlphaOfLength(10); + Processor removeProcessorWithEmptyField = new RemoveProcessor.Factory(TestTemplateService.instance()).create( + null, + processorTag, + null, + config + ); + assertThrows( + "field path cannot be null nor empty", + IllegalArgumentException.class, + () -> removeProcessorWithEmptyField.execute(ingestDocument) + ); } public void testIgnoreMissing() throws Exception { @@ -84,5 +115,13 @@ public void testIgnoreMissing() throws Exception { String processorTag = randomAlphaOfLength(10); Processor processor = new RemoveProcessor.Factory(TestTemplateService.instance()).create(null, processorTag, null, config); processor.execute(ingestDocument); + + // when using template snippet, the resolved field path maybe empty + Map configWithEmptyField = new HashMap<>(); + configWithEmptyField.put("field", ""); + configWithEmptyField.put("ignore_missing", true); + processorTag = randomAlphaOfLength(10); + processor = new RemoveProcessor.Factory(TestTemplateService.instance()).create(null, processorTag, null, configWithEmptyField); + processor.execute(ingestDocument); } } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RenameProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RenameProcessorTests.java index fc95693024cb0..a600464371af8 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RenameProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RenameProcessorTests.java @@ -112,6 +112,15 @@ public void testRenameNonExistingField() throws Exception { } catch (IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("field [" + fieldName + "] doesn't exist")); } + + // when using template snippet, the resolved field path maybe empty + processor = createRenameProcessor("", RandomDocumentPicks.randomFieldName(random()), false); + try { + processor.execute(ingestDocument); + fail("processor execute should have failed"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("field path cannot be null nor empty")); + } } public void testRenameNonExistingFieldWithIgnoreMissing() throws Exception { @@ -121,6 +130,11 @@ public void testRenameNonExistingFieldWithIgnoreMissing() throws Exception { Processor processor = createRenameProcessor(fieldName, RandomDocumentPicks.randomFieldName(random()), true); processor.execute(ingestDocument); assertIngestDocument(originalIngestDocument, ingestDocument); + + // when using template snippet, the resolved field path maybe empty + processor = createRenameProcessor("", RandomDocumentPicks.randomFieldName(random()), true); + processor.execute(ingestDocument); + assertIngestDocument(originalIngestDocument, ingestDocument); } public void testRenameNewFieldAlreadyExists() throws Exception { diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/280_rename_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/280_rename_processor.yml new file mode 100644 index 0000000000000..96b2256bcc1dc --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/280_rename_processor.yml @@ -0,0 +1,66 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "my_pipeline" + ignore: 404 + +--- +"Test rename processor with non-existing field and without ignore_missing": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "rename" : { + "field" : "{{field_foo}}", + "target_field" : "bar" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: '/field path cannot be null nor empty/' + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { message: "foo bar baz" } + +--- +"Test rename processor with non-existing field and ignore_missing": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "rename" : { + "field" : "{{field_foo}}", + "target_field" : "bar", + "ignore_missing" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { message: "foo bar baz" } + + - do: + get: + index: test + id: 1 + - match: { _source.message: "foo bar baz" } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml new file mode 100644 index 0000000000000..ff5a17136afa2 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml @@ -0,0 +1,93 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "my_pipeline" + ignore: 404 + +--- +"Test remove processor with non-existing field and without ignore_missing": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "{{unknown}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /field path cannot be null nor empty/ + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { message: "foo bar baz" } + +--- +"Test remove processor with resolved field path doesn't exist": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "{{foo}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /field \[bar\] doesn\'t exist/ + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + message: "foo bar baz", + foo: "bar" + } + +--- +"Test remove processor with non-existing field and ignore_missing": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "{{unknown}}", + "ignore_missing" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { message: "foo bar baz" } + + - do: + get: + index: test + id: 1 + - match: { _source.message: "foo bar baz" } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml index e012a82b15927..7c073739f6a1f 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml @@ -976,3 +976,140 @@ teardown: } - match: { error.root_cause.0.type: "illegal_argument_exception" } - match: { error.root_cause.0.reason: "Pipeline processor configured for non-existent pipeline [____pipeline_doesnot_exist___]" } + +--- +"Test simulate with docs containing metadata fields": + - do: + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "field": "field2", + "value": "foo" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_routing": "foo", + "_version": 100, + "_if_seq_no": 12333333333333333, + "_if_primary_term": 1, + "_source": { + "foo": "bar" + } + } + ] + } + + - length: { docs: 1 } + - match: { docs.0.doc._index: "index" } + - match: { docs.0.doc._id: "id" } + - match: { docs.0.doc._routing: "foo" } + - match: { docs.0.doc._version: "100" } + - match: { docs.0.doc._if_seq_no: "12333333333333333" } + - match: { docs.0.doc._if_primary_term: "1" } + - match: { docs.0.doc._source.foo: "bar" } + + - do: + catch: bad_request + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "field2", + "value": "foo" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_routing": "foo", + "_version": "bar", + "_source": { + "foo": "bar" + } + } + ] + } + - match: { status: 400 } + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Failed to parse parameter [_version], only int or long is accepted" } + + - do: + catch: bad_request + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "field2", + "value": "foo" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_routing": "foo", + "_if_seq_no": "123", + "_source": { + "foo": "bar" + } + } + ] + } + - match: { status: 400 } + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Failed to parse parameter [_if_seq_no], only int or long is accepted" } + + - do: + catch: bad_request + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "set" : { + "field" : "field2", + "value": "foo" + } + } + ] + }, + "docs": [ + { + "_index": "index", + "_id": "id", + "_routing": "foo", + "_if_primary_term": "1", + "_source": { + "foo": "bar" + } + } + ] + } + - match: { status: 400 } + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Failed to parse parameter [_if_primary_term], only int or long is accepted" } diff --git a/modules/lang-expression/licenses/asm-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-9.5.jar.sha1 deleted file mode 100644 index ea4aa3581dc87..0000000000000 --- a/modules/lang-expression/licenses/asm-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc6ea1875f4d64fbc85e1691c95b96a3d8569c90 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-9.6.jar.sha1 new file mode 100644 index 0000000000000..2d9e6a9d3cfd6 --- /dev/null +++ b/modules/lang-expression/licenses/asm-9.6.jar.sha1 @@ -0,0 +1 @@ +aa205cf0a06dbd8e04ece91c0b37c3f5d567546a \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 deleted file mode 100644 index 5be792660c19f..0000000000000 --- a/modules/lang-expression/licenses/asm-commons-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19ab5b5800a3910d30d3a3e64fdb00fd0cb42de0 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 new file mode 100644 index 0000000000000..a0814f495771f --- /dev/null +++ b/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 @@ -0,0 +1 @@ +f1a9e5508eff490744144565c47326c8648be309 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 deleted file mode 100644 index fb42db6a9d15c..0000000000000 --- a/modules/lang-expression/licenses/asm-tree-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd33c8b6373abaa675be407082fdfda35021254a \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 new file mode 100644 index 0000000000000..101eb03b4b736 --- /dev/null +++ b/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 @@ -0,0 +1 @@ +c0cdda9d211e965d2a4448aa3fd86110f2f8c2de \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.7.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.7.0.jar.sha1 deleted file mode 100644 index ecf696b4b3b83..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -297e1cfade4ef71466cc9d4f361d81807c8dc4c8 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..892865a017f48 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 @@ -0,0 +1 @@ +7725476acfcb9bdfeff1b813ce15c39c6b857dc2 \ No newline at end of file diff --git a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java index ed18b9ba6eb71..4ae3be72a8ab3 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/MoreExpressionIT.java @@ -32,12 +32,16 @@ package org.opensearch.script.expression; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; import org.opensearch.action.update.UpdateRequestBuilder; import org.opensearch.common.lucene.search.function.CombineFunction; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; @@ -53,9 +57,10 @@ import org.opensearch.search.aggregations.pipeline.SimpleValue; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -64,6 +69,7 @@ import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.PipelineAggregatorBuilders.bucketScript; @@ -74,7 +80,24 @@ import static org.hamcrest.Matchers.notNullValue; // TODO: please convert to unit tests! -public class MoreExpressionIT extends OpenSearchIntegTestCase { +public class MoreExpressionIT extends ParameterizedOpenSearchIntegTestCase { + + public MoreExpressionIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { @@ -481,6 +504,10 @@ public void testInvalidFieldMember() { } public void testSpecialValueVariable() throws Exception { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10079", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); // i.e. _value for aggregations createIndex("test"); ensureGreen("test"); diff --git a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java index f372e6a1ca2bc..f3c994521692c 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/opensearch/script/expression/StoredExpressionIT.java @@ -32,7 +32,10 @@ package org.opensearch.script.expression; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.plugins.Plugin; @@ -40,16 +43,36 @@ import org.opensearch.script.ScriptType; import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.containsString; //TODO: please convert to unit tests! -public class StoredExpressionIT extends OpenSearchIntegTestCase { +public class StoredExpressionIT extends ParameterizedOpenSearchIntegTestCase { + + public StoredExpressionIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); diff --git a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java index 035d2402857e0..11b2e20eea523 100644 --- a/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/opensearch/script/expression/ExpressionScriptEngine.java @@ -73,7 +73,7 @@ /** * Provides the infrastructure for Lucene expressions as a scripting language for OpenSearch. - * + *

    * Only contexts returning numeric types or {@link Object} are supported. */ public class ExpressionScriptEngine implements ScriptEngine { diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java index ff2be954a0878..204082ba10ded 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/opensearch/script/mustache/MultiSearchTemplateIT.java @@ -32,12 +32,16 @@ package org.opensearch.script.mustache; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequest; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexNotFoundException; import org.opensearch.plugins.Plugin; import org.opensearch.script.ScriptType; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -46,6 +50,7 @@ import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; @@ -53,7 +58,24 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.core.Is.is; -public class MultiSearchTemplateIT extends OpenSearchIntegTestCase { +public class MultiSearchTemplateIT extends ParameterizedOpenSearchIntegTestCase { + + public MultiSearchTemplateIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java index f4d7198dc2124..ec84475b70bb6 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MustacheScriptEngine.java @@ -59,7 +59,7 @@ /** * Main entry point handling template registration, compilation and * execution. - * + *

    * Template handling is based on Mustache. Template handling is a two step * process: First compile the string representing the template, the resulting * {@link Mustache} object can then be re-used for subsequent executions. diff --git a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheScriptEngineTests.java b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheScriptEngineTests.java index 9e97863306148..fbb7d09709a91 100644 --- a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheScriptEngineTests.java +++ b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/MustacheScriptEngineTests.java @@ -200,7 +200,7 @@ private String getChars() { /** * From https://www.ietf.org/rfc/rfc4627.txt: - * + *

    * All Unicode characters may be placed within the * quotation marks except for the characters that must be escaped: * quotation mark, reverse solidus, and the control characters (U+0000 diff --git a/modules/lang-painless/licenses/asm-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-9.5.jar.sha1 deleted file mode 100644 index ea4aa3581dc87..0000000000000 --- a/modules/lang-painless/licenses/asm-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc6ea1875f4d64fbc85e1691c95b96a3d8569c90 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-9.6.jar.sha1 new file mode 100644 index 0000000000000..2d9e6a9d3cfd6 --- /dev/null +++ b/modules/lang-painless/licenses/asm-9.6.jar.sha1 @@ -0,0 +1 @@ +aa205cf0a06dbd8e04ece91c0b37c3f5d567546a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 deleted file mode 100644 index 9e87d3ce7d719..0000000000000 --- a/modules/lang-painless/licenses/asm-analysis-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -490bacc77de7cbc0be1a30bb3471072d705be4a4 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 new file mode 100644 index 0000000000000..fa42ea1198165 --- /dev/null +++ b/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 @@ -0,0 +1 @@ +9ce6c7b174bd997fc2552dff47964546bd7a5ec3 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 deleted file mode 100644 index 5be792660c19f..0000000000000 --- a/modules/lang-painless/licenses/asm-commons-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19ab5b5800a3910d30d3a3e64fdb00fd0cb42de0 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 new file mode 100644 index 0000000000000..a0814f495771f --- /dev/null +++ b/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 @@ -0,0 +1 @@ +f1a9e5508eff490744144565c47326c8648be309 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 deleted file mode 100644 index fb42db6a9d15c..0000000000000 --- a/modules/lang-painless/licenses/asm-tree-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd33c8b6373abaa675be407082fdfda35021254a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 new file mode 100644 index 0000000000000..101eb03b4b736 --- /dev/null +++ b/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 @@ -0,0 +1 @@ +c0cdda9d211e965d2a4448aa3fd86110f2f8c2de \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 deleted file mode 100644 index 5fffbfe655deb..0000000000000 --- a/modules/lang-painless/licenses/asm-util-9.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -64b5a1fc8c1b15ed2efd6a063e976bc8d3dc5ffe \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 new file mode 100644 index 0000000000000..1f42ac62dc69c --- /dev/null +++ b/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 @@ -0,0 +1 @@ +f77caf84eb93786a749b2baa40865b9613e3eaee \ No newline at end of file diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/Whitelist.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/Whitelist.java index b400c7a027fca..57ca2b9c6b5b6 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/Whitelist.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/Whitelist.java @@ -42,7 +42,7 @@ * Allowlist contains data structures designed to be used to generate an allowlist of Java classes, * constructors, methods, and fields that can be used within a Painless script at both compile-time * and run-time. - * + *

    * A Allowlist consists of several pieces with {@link WhitelistClass}s as the top level. Each * {@link WhitelistClass} will contain zero-to-many {@link WhitelistConstructor}s, {@link WhitelistMethod}s, and * {@link WhitelistField}s which are what will be available with a Painless script. See each individual diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/WhitelistClass.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/WhitelistClass.java index bf5083998f94b..d86d0754e8f02 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/WhitelistClass.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/WhitelistClass.java @@ -45,12 +45,12 @@ * classes. Though, since multiple allowlists may be combined into a single allowlist for a * specific context, as long as multiple classes representing the same Java class have the same * class name and have legal constructor/method overloading they can be merged together. - * + *

    * Classes in Painless allow for arity overloading for constructors and methods. Arity overloading * means that multiple constructors are allowed for a single class as long as they have a different * number of parameters, and multiples methods with the same name are allowed for a single class * as long as they have the same return type and a different number of parameters. - * + *

    * Classes will automatically extend other allowlisted classes if the Java class they represent is a * subclass of other classes including Java interfaces. */ diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/WhitelistLoader.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/WhitelistLoader.java index 2da6d8fce1d8e..606a6c1facce0 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/WhitelistLoader.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/WhitelistLoader.java @@ -67,11 +67,11 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep * is the path of a single text file. The {@link Class}'s {@link ClassLoader} will be used to lookup the Java * reflection objects for each individual {@link Class}, {@link Constructor}, {@link Method}, and {@link Field} * specified as part of the allowlist in the text file. - * + *

    * A single pass is made through each file to collect all the information about each class, constructor, method, * and field. Most validation will be done at a later point after all allowlists have been gathered and their * merging takes place. - * + *

    * A painless type name is one of the following: *

      *
    • def - The Painless dynamic type which is automatically included without a need to be @@ -129,13 +129,13 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep * be appropriately parsed and handled. Painless complex types must be specified with the * fully-qualified Java class name. Method argument types, method return types, and field types * must be specified with Painless type names (def, fully-qualified, or short) as described earlier. - * + *

      * The following example is used to create a single allowlist text file: * - * {@code + *

            * # primitive types
            *
      -     * class int -> int {
      +     * class int -> int {
            * }
            *
            * # complex types
      @@ -161,7 +161,7 @@ public static Whitelist loadFromResourceFiles(Class resource, String... filep
            *   int value1
            *   def value2
            * }
      -     * }
      +     * 
      */ public static Whitelist loadFromResourceFiles(Class resource, Map parsers, String... filepaths) { List allowlistClasses = new ArrayList<>(); diff --git a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/WhitelistMethod.java b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/WhitelistMethod.java index 9a57a5a098c19..3b9092ae6a119 100644 --- a/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/WhitelistMethod.java +++ b/modules/lang-painless/spi/src/main/java/org/opensearch/painless/spi/WhitelistMethod.java @@ -45,7 +45,7 @@ * are using the '.' operator on an existing class variable/field. Painless classes may have multiple * methods with the same name as long as they comply with arity overloading described in * {@link WhitelistClass}. - * + *

      * Classes may also have additional methods that are not part of the Java class the class represents - * these are known as augmented methods. An augmented method can be added to a class as a part of any * Java class as long as the method is static and the first parameter of the method is the Java class diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java index 1f57c5cbd1149..35b53e16cc3fa 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/Compiler.java @@ -73,9 +73,7 @@ final class Compiler { */ private static final CodeSource CODESOURCE; - /** - * Setup the code privileges. - */ + /* Setup the code privileges. */ static { try { // Setup the code privileges. diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupUtility.java b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupUtility.java index cae425ad1fe3b..3164f5e6388c7 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupUtility.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/lookup/PainlessLookupUtility.java @@ -42,13 +42,13 @@ /** * PainlessLookupUtility contains methods shared by {@link PainlessLookupBuilder}, {@link PainlessLookup}, and other classes within * Painless for conversion between type names and types along with some other various utility methods. - * + *

      * The following terminology is used for variable names throughout the lookup package: - * + *

      * A class is a set of methods and fields under a specific class name. A type is either a class or an array under a specific type name. * Note the distinction between class versus type is class means that no array classes will be be represented whereas type allows array * classes to be represented. The set of available classes will always be a subset of the available types. - * + *

      * Under ambiguous circumstances most variable names are prefixed with asm, java, or painless. If the variable value is the same for asm, * java, and painless, no prefix is used. Target is used as a prefix to represent if a constructor, method, or field is being * called/accessed on that specific class. Parameter is often a postfix used to represent if a type is used as a parameter to a diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/phase/PainlessSemanticAnalysisPhase.java b/modules/lang-painless/src/main/java/org/opensearch/painless/phase/PainlessSemanticAnalysisPhase.java index 04165f44ba212..8a05d6742af97 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/phase/PainlessSemanticAnalysisPhase.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/phase/PainlessSemanticAnalysisPhase.java @@ -126,9 +126,9 @@ public void visitFunction(SFunction userFunctionNode, ScriptScope scriptScope) { /** * Visits an expression that is also considered a statement. - * + *

      * If the statement is a return from the execute method, performs return value conversion. - * + *

      * Checks: control flow, type validation */ @Override @@ -168,9 +168,9 @@ public void visitExpression(SExpression userExpressionNode, SemanticScope semant /** * Visits a return statement and casts the value to the return type if possible. - * + *

      * If the statement is a return from the execute method, performs return value conversion. - * + *

      * Checks: type validation */ @Override diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/symbol/SemanticScope.java b/modules/lang-painless/src/main/java/org/opensearch/painless/symbol/SemanticScope.java index e27530d745e8f..5ac802038afa6 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/symbol/SemanticScope.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/symbol/SemanticScope.java @@ -49,7 +49,7 @@ * Tracks information within a scope required for compilation during the * semantic phase in the user tree. There are three types of scopes - * {@link FunctionScope}, {@link LambdaScope}, and {@link BlockScope}. - * + *

      * Scopes are stacked as they are created during the user tree's semantic * phase with each scope beyond the top-level containing a reference to * its parent. As a scope is no longer necessary, it's dropped automatically diff --git a/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.score.txt b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.score.txt index 24649ca78b354..b9268421c7ef3 100644 --- a/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.score.txt +++ b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.score.txt @@ -24,7 +24,6 @@ class org.opensearch.script.ScoreScript @no_import { static_import { int termFreq(org.opensearch.script.ScoreScript, String, String) bound_to org.opensearch.script.ScoreScriptUtils$TermFreq - float tf(org.opensearch.script.ScoreScript, String, String) bound_to org.opensearch.script.ScoreScriptUtils$TF long totalTermFreq(org.opensearch.script.ScoreScript, String, String) bound_to org.opensearch.script.ScoreScriptUtils$TotalTermFreq long sumTotalTermFreq(org.opensearch.script.ScoreScript, String) bound_to org.opensearch.script.ScoreScriptUtils$SumTotalTermFreq double saturation(double, double) from_class org.opensearch.script.ScoreScriptUtils diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java index 397a7b48b472a..366e848416328 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java @@ -82,7 +82,7 @@ /** * Mapper for a text field that optimizes itself for as-you-type completion by indexing its content into subfields. Each subfield * modifies the analysis chain of the root field to index terms the user would create as they type out the value in the root field - * + *

      * The structure of these fields is * *

      diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/AbstractParentChildTestCase.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/AbstractParentChildTestCase.java
      index dac1b313777a6..e049edf843069 100644
      --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/AbstractParentChildTestCase.java
      +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/AbstractParentChildTestCase.java
      @@ -33,6 +33,11 @@
       package org.opensearch.join.aggregations;
       
       import org.opensearch.action.index.IndexRequestBuilder;
      +import org.opensearch.action.search.SearchRequestBuilder;
      +import org.opensearch.action.search.SearchResponse;
      +import org.opensearch.client.Requests;
      +import org.opensearch.cluster.metadata.IndexMetadata;
      +import org.opensearch.common.settings.Settings;
       import org.opensearch.join.query.ParentChildTestCase;
       import org.junit.Before;
       
      @@ -44,6 +49,7 @@
       import java.util.Set;
       
       import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
      +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse;
       
       /**
        * Small base test-class which combines stuff used for Children and Parent aggregation tests
      @@ -52,6 +58,10 @@ public abstract class AbstractParentChildTestCase extends ParentChildTestCase {
           protected final Map categoryToControl = new HashMap<>();
           protected final Map articleToControl = new HashMap<>();
       
      +    public AbstractParentChildTestCase(Settings dynamicSettings) {
      +        super(dynamicSettings);
      +    }
      +
           @Before
           public void setupCluster() throws Exception {
               assertAcked(
      @@ -154,4 +164,38 @@ private ParentControl(String category) {
                   this.category = category;
               }
           }
      +
      +    // Test when there is 1 child document and 1 parent document per segment.
      +    public void testSparseSegments() throws InterruptedException {
      +        assertAcked(
      +            prepareCreate("sparse").setMapping(
      +                addFieldMappings(
      +                    buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "article", "comment"),
      +                    "commenter",
      +                    "keyword",
      +                    "category",
      +                    "keyword"
      +                )
      +            )
      +                .setSettings(
      +                    Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
      +                )
      +        );
      +
      +        List requests = new ArrayList<>();
      +        requests.add(createIndexRequest("sparse", "article", "article-0", null, "category", List.of("0")));
      +        indexRandom(true, false, requests);
      +        client().admin().indices().refresh(Requests.refreshRequest("sparse")).actionGet();
      +        requests = new ArrayList<>();
      +        requests.add(createIndexRequest("sparse", "comment", "comment-0", "article-0", "commenter", "0"));
      +        indexRandom(true, false, requests);
      +
      +        SearchResponse searchResponse = getSearchRequest().get();
      +        assertSearchResponse(searchResponse);
      +        validateSpareSegmentsSearchResponse(searchResponse);
      +    }
      +
      +    abstract SearchRequestBuilder getSearchRequest();
      +
      +    abstract void validateSpareSegmentsSearchResponse(SearchResponse searchResponse);
       }
      diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java
      index 72c502c616ff8..5fc0a202ae45e 100644
      --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java
      +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ChildrenIT.java
      @@ -31,13 +31,17 @@
       
       package org.opensearch.join.aggregations;
       
      +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
      +
       import org.apache.lucene.search.join.ScoreMode;
       import org.opensearch.action.index.IndexRequestBuilder;
      +import org.opensearch.action.search.SearchRequestBuilder;
       import org.opensearch.action.search.SearchResponse;
       import org.opensearch.action.update.UpdateResponse;
       import org.opensearch.client.Requests;
       import org.opensearch.cluster.metadata.IndexMetadata;
       import org.opensearch.common.settings.Settings;
      +import org.opensearch.common.util.FeatureFlags;
       import org.opensearch.search.SearchHit;
       import org.opensearch.search.aggregations.AggregationBuilders;
       import org.opensearch.search.aggregations.InternalAggregation;
      @@ -47,14 +51,18 @@
       import org.opensearch.search.sort.SortOrder;
       
       import java.util.ArrayList;
      +import java.util.Arrays;
      +import java.util.Collection;
       import java.util.List;
       import java.util.Map;
       import java.util.Set;
       
      +import static org.opensearch.index.query.QueryBuilders.matchAllQuery;
       import static org.opensearch.index.query.QueryBuilders.matchQuery;
       import static org.opensearch.index.query.QueryBuilders.termQuery;
       import static org.opensearch.join.aggregations.JoinAggregationBuilders.children;
       import static org.opensearch.join.query.JoinQueryBuilders.hasChildQuery;
      +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING;
       import static org.opensearch.search.aggregations.AggregationBuilders.sum;
       import static org.opensearch.search.aggregations.AggregationBuilders.terms;
       import static org.opensearch.search.aggregations.AggregationBuilders.topHits;
      @@ -69,6 +77,23 @@
       
       public class ChildrenIT extends AbstractParentChildTestCase {
       
      +    public ChildrenIT(Settings settings) {
      +        super(settings);
      +    }
      +
      +    @ParametersFactory
      +    public static Collection parameters() {
      +        return Arrays.asList(
      +            new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() },
      +            new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }
      +        );
      +    }
      +
      +    @Override
      +    protected Settings featureFlagSettings() {
      +        return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build();
      +    }
      +
           public void testChildrenAggs() throws Exception {
               SearchResponse searchResponse = client().prepareSearch("test")
                   .setQuery(matchQuery("randomized", true))
      @@ -407,4 +432,18 @@ public void testPostCollectAllLeafReaders() throws Exception {
               children = parents.getBuckets().get(0).getAggregations().get("child_docs");
               assertThat(children.getDocCount(), equalTo(2L));
           }
      +
      +    @Override
      +    SearchRequestBuilder getSearchRequest() {
      +        return client().prepareSearch("sparse")
      +            .setSize(10000)
      +            .setQuery(matchAllQuery())
      +            .addAggregation(children("to_comment", "comment").subAggregation(terms("commenters").field("commenter").size(10000)));
      +    }
      +
      +    @Override
      +    void validateSpareSegmentsSearchResponse(SearchResponse searchResponse) {
      +        Children children = searchResponse.getAggregations().get("to_comment");
      +        assertEquals(children.getDocCount(), 1);
      +    }
       }
      diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java
      index 351b0beec481b..04703a65aa19d 100644
      --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java
      +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/aggregations/ParentIT.java
      @@ -32,12 +32,18 @@
       
       package org.opensearch.join.aggregations;
       
      +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
      +
       import org.opensearch.action.search.SearchRequestBuilder;
       import org.opensearch.action.search.SearchResponse;
      +import org.opensearch.common.settings.Settings;
      +import org.opensearch.common.util.FeatureFlags;
       import org.opensearch.search.aggregations.Aggregation;
       import org.opensearch.search.aggregations.bucket.MultiBucketsAggregation;
       import org.opensearch.search.aggregations.bucket.terms.Terms;
       
      +import java.util.Arrays;
      +import java.util.Collection;
       import java.util.HashMap;
       import java.util.HashSet;
       import java.util.List;
      @@ -47,8 +53,10 @@
       import java.util.stream.Collectors;
       import java.util.stream.Stream;
       
      +import static org.opensearch.index.query.QueryBuilders.matchAllQuery;
       import static org.opensearch.index.query.QueryBuilders.matchQuery;
       import static org.opensearch.join.aggregations.JoinAggregationBuilders.parent;
      +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING;
       import static org.opensearch.search.aggregations.AggregationBuilders.terms;
       import static org.opensearch.search.aggregations.AggregationBuilders.topHits;
       import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse;
      @@ -56,6 +64,23 @@
       
       public class ParentIT extends AbstractParentChildTestCase {
       
      +    public ParentIT(Settings settings) {
      +        super(settings);
      +    }
      +
      +    @ParametersFactory
      +    public static Collection parameters() {
      +        return Arrays.asList(
      +            new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() },
      +            new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }
      +        );
      +    }
      +
      +    @Override
      +    protected Settings featureFlagSettings() {
      +        return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build();
      +    }
      +
           public void testSimpleParentAgg() throws Exception {
               final SearchRequestBuilder searchRequest = client().prepareSearch("test")
                   .setSize(10000)
      @@ -264,4 +289,18 @@ public void testTermsParentAggTerms() throws Exception {
                   }
               }
           }
      +
      +    @Override
      +    SearchRequestBuilder getSearchRequest() {
      +        return client().prepareSearch("sparse")
      +            .setSize(10000)
      +            .setQuery(matchAllQuery())
      +            .addAggregation(parent("to_article", "comment").subAggregation(terms("category").field("category").size(10000)));
      +    }
      +
      +    @Override
      +    void validateSpareSegmentsSearchResponse(SearchResponse searchResponse) {
      +        Parent parentAgg = searchResponse.getAggregations().get("to_article");
      +        assertEquals(parentAgg.getDocCount(), 1);
      +    }
       }
      diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java
      index 6a35496946a22..f7b8b47d2f3a2 100644
      --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java
      +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java
      @@ -31,6 +31,8 @@
       
       package org.opensearch.join.query;
       
      +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
      +
       import org.apache.lucene.search.join.ScoreMode;
       import org.opensearch.action.explain.ExplainResponse;
       import org.opensearch.action.index.IndexRequestBuilder;
      @@ -42,6 +44,7 @@
       import org.opensearch.common.lucene.search.function.FunctionScoreQuery;
       import org.opensearch.common.settings.Settings;
       import org.opensearch.common.unit.TimeValue;
      +import org.opensearch.common.util.FeatureFlags;
       import org.opensearch.core.rest.RestStatus;
       import org.opensearch.index.query.BoolQueryBuilder;
       import org.opensearch.index.query.IdsQueryBuilder;
      @@ -65,6 +68,8 @@
       
       import java.io.IOException;
       import java.util.ArrayList;
      +import java.util.Arrays;
      +import java.util.Collection;
       import java.util.HashMap;
       import java.util.HashSet;
       import java.util.List;
      @@ -87,6 +92,7 @@
       import static org.opensearch.join.query.JoinQueryBuilders.hasChildQuery;
       import static org.opensearch.join.query.JoinQueryBuilders.hasParentQuery;
       import static org.opensearch.join.query.JoinQueryBuilders.parentId;
      +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING;
       import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
       import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount;
       import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures;
      @@ -100,6 +106,23 @@
       
       public class ChildQuerySearchIT extends ParentChildTestCase {
       
      +    public ChildQuerySearchIT(Settings settings) {
      +        super(settings);
      +    }
      +
      +    @ParametersFactory
      +    public static Collection parameters() {
      +        return Arrays.asList(
      +            new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() },
      +            new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }
      +        );
      +    }
      +
      +    @Override
      +    protected Settings featureFlagSettings() {
      +        return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build();
      +    }
      +
           public void testMultiLevelChild() throws Exception {
               assertAcked(
                   prepareCreate("test").setMapping(
      diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java
      index ffcc9cf38545f..39da86c7fd726 100644
      --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java
      +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/InnerHitsIT.java
      @@ -32,11 +32,15 @@
       
       package org.opensearch.join.query;
       
      +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
      +
       import org.apache.lucene.search.join.ScoreMode;
       import org.apache.lucene.util.ArrayUtil;
       import org.opensearch.action.index.IndexRequestBuilder;
       import org.opensearch.action.search.SearchPhaseExecutionException;
       import org.opensearch.action.search.SearchResponse;
      +import org.opensearch.common.settings.Settings;
      +import org.opensearch.common.util.FeatureFlags;
       import org.opensearch.index.IndexSettings;
       import org.opensearch.index.query.BoolQueryBuilder;
       import org.opensearch.index.query.InnerHitBuilder;
      @@ -54,6 +58,7 @@
       import org.opensearch.search.sort.SortOrder;
       
       import java.util.ArrayList;
      +import java.util.Arrays;
       import java.util.Collection;
       import java.util.Collections;
       import java.util.List;
      @@ -73,6 +78,7 @@
       import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
       import static org.opensearch.join.query.JoinQueryBuilders.hasChildQuery;
       import static org.opensearch.join.query.JoinQueryBuilders.hasParentQuery;
      +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING;
       import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
       import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount;
       import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures;
      @@ -87,6 +93,23 @@
       
       public class InnerHitsIT extends ParentChildTestCase {
       
      +    public InnerHitsIT(Settings settings) {
      +        super(settings);
      +    }
      +
      +    @ParametersFactory
      +    public static Collection parameters() {
      +        return Arrays.asList(
      +            new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() },
      +            new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }
      +        );
      +    }
      +
      +    @Override
      +    protected Settings featureFlagSettings() {
      +        return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build();
      +    }
      +
           @Override
           protected Collection> nodePlugins() {
               ArrayList> plugins = new ArrayList<>(super.nodePlugins());
      diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java
      index 775f9672e9c47..9b87b340eb0a8 100644
      --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java
      +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ParentChildTestCase.java
      @@ -41,6 +41,7 @@
       import org.opensearch.plugins.Plugin;
       import org.opensearch.test.InternalSettingsPlugin;
       import org.opensearch.test.OpenSearchIntegTestCase;
      +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase;
       
       import java.io.IOException;
       import java.util.Arrays;
      @@ -50,7 +51,11 @@
       import java.util.Map;
       
       @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE)
      -public abstract class ParentChildTestCase extends OpenSearchIntegTestCase {
      +public abstract class ParentChildTestCase extends ParameterizedOpenSearchIntegTestCase {
      +
      +    public ParentChildTestCase(Settings dynamicSettings) {
      +        super(dynamicSettings);
      +    }
       
           @Override
           protected boolean ignoreExternalCluster() {
      diff --git a/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ChildrenAggregatorFactory.java b/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ChildrenAggregatorFactory.java
      index 793b35111cfe2..bbca89fc56820 100644
      --- a/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ChildrenAggregatorFactory.java
      +++ b/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ChildrenAggregatorFactory.java
      @@ -118,4 +118,9 @@ public String getStatsSubtype() {
               // Child Aggregation is registered in non-standard way, so it might return child's values type
               return OTHER_SUBTYPE;
           }
      +
      +    @Override
      +    protected boolean supportsConcurrentSegmentSearch() {
      +        return true;
      +    }
       }
      diff --git a/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ParentAggregatorFactory.java b/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ParentAggregatorFactory.java
      index 40c07c8f53e20..9a21cd1db3200 100644
      --- a/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ParentAggregatorFactory.java
      +++ b/modules/parent-join/src/main/java/org/opensearch/join/aggregations/ParentAggregatorFactory.java
      @@ -118,4 +118,10 @@ public String getStatsSubtype() {
               // Parent Aggregation is registered in non-standard way
               return OTHER_SUBTYPE;
           }
      +
      +    @Override
      +    protected boolean supportsConcurrentSegmentSearch() {
      +        // See https://github.com/opensearch-project/OpenSearch/issues/9316
      +        return false;
      +    }
       }
      diff --git a/modules/parent-join/src/main/java/org/opensearch/join/query/HasChildQueryBuilder.java b/modules/parent-join/src/main/java/org/opensearch/join/query/HasChildQueryBuilder.java
      index 1a51259e9e4e4..e930780613ed6 100644
      --- a/modules/parent-join/src/main/java/org/opensearch/join/query/HasChildQueryBuilder.java
      +++ b/modules/parent-join/src/main/java/org/opensearch/join/query/HasChildQueryBuilder.java
      @@ -373,7 +373,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException {
            * A query that rewrites into another query using
            * {@link JoinUtil#createJoinQuery(String, Query, Query, IndexSearcher, ScoreMode, OrdinalMap, int, int)}
            * that executes the actual join.
      -     *
      +     * 

      * This query is exclusively used by the {@link HasChildQueryBuilder} and {@link HasParentQueryBuilder} to get access * to the {@link DirectoryReader} used by the current search in order to retrieve the {@link OrdinalMap}. * The {@link OrdinalMap} is required by {@link JoinUtil} to execute the join. diff --git a/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java b/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java index 821728bcda944..516d8c8581de7 100644 --- a/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java +++ b/modules/percolator/src/internalClusterTest/java/org/opensearch/percolator/PercolatorQuerySearchIT.java @@ -31,6 +31,8 @@ package org.opensearch.percolator; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.join.ScoreMode; import org.opensearch.OpenSearchException; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; @@ -39,6 +41,7 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.DistanceUnit; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesArray; @@ -54,7 +57,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -77,6 +80,7 @@ import static org.opensearch.index.query.QueryBuilders.spanNotQuery; import static org.opensearch.index.query.QueryBuilders.spanTermQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchHits; @@ -86,7 +90,24 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsNull.notNullValue; -public class PercolatorQuerySearchIT extends OpenSearchIntegTestCase { +public class PercolatorQuerySearchIT extends ParameterizedOpenSearchIntegTestCase { + + public PercolatorQuerySearchIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected boolean addMockGeoShapeFieldMapper() { diff --git a/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java index ea80b59711b8a..5b6dcbe482d04 100644 --- a/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/internalClusterTest/java/org/opensearch/index/rankeval/RankEvalRequestIT.java @@ -32,10 +32,14 @@ package org.opensearch.index.rankeval; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -43,7 +47,7 @@ import org.opensearch.indices.IndexClosedException; import org.opensearch.plugins.Plugin; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.junit.Before; import java.util.ArrayList; @@ -54,15 +58,33 @@ import java.util.Set; import static org.opensearch.index.rankeval.EvaluationMetric.filterUnratedDocuments; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.instanceOf; -public class RankEvalRequestIT extends OpenSearchIntegTestCase { +public class RankEvalRequestIT extends ParameterizedOpenSearchIntegTestCase { private static final String TEST_INDEX = "test"; private static final String INDEX_ALIAS = "alias0"; private static final int RELEVANT_RATING_1 = 1; + public RankEvalRequestIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Collection> nodePlugins() { return Arrays.asList(RankEvalPlugin.class); diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java index 1fe5a2840fae6..76197561fdb5e 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java @@ -71,10 +71,10 @@ * supplied query parameters) against a set of possible search requests (read: * search specifications, expressed as query/search request templates) and * compares the result against a set of annotated documents per search intent. - * + *

      * If any documents are returned that haven't been annotated the document id of * those is returned per search intent. - * + *

      * The resulting search quality is computed in terms of precision at n and * returned for each search specification for the full set of search intents as * averaged precision at n. diff --git a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java index fcf1fa489f740..d96e3212e05a2 100644 --- a/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java +++ b/modules/rank-eval/src/test/java/org/opensearch/index/rankeval/DiscountedCumulativeGainTests.java @@ -67,7 +67,7 @@ public class DiscountedCumulativeGainTests extends OpenSearchTestCase { /** * Assuming the docs are ranked in the following order: - * + *

      * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) * ------------------------------------------------------------------------------------------- * 1 | 3 | 7.0 | 1.0 | 7.0 | 7.0 |  @@ -76,7 +76,7 @@ public class DiscountedCumulativeGainTests extends OpenSearchTestCase { * 4 | 0 | 0.0 | 2.321928094887362 | 0.0 * 5 | 1 | 1.0 | 2.584962500721156 | 0.38685280723454163 * 6 | 2 | 3.0 | 2.807354922057604 | 1.0686215613240666 - * + *

      * dcg = 13.84826362927298 (sum of last column) */ public void testDCGAt() { @@ -91,20 +91,20 @@ public void testDCGAt() { DiscountedCumulativeGain dcg = new DiscountedCumulativeGain(); assertEquals(EXPECTED_DCG, dcg.evaluate("id", hits, rated).metricScore(), DELTA); - /** - * Check with normalization: to get the maximal possible dcg, sort documents by - * relevance in descending order - * - * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) - * --------------------------------------------------------------------------------------- - * 1 | 3 | 7.0 | 1.0  | 7.0 - * 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 - * 3 | 2 | 3.0 | 2.0  | 1.5 - * 4 | 2 | 3.0 | 2.321928094887362 | 1.2920296742201793 - * 5 | 1 | 1.0 | 2.584962500721156  | 0.38685280723454163 - * 6 | 0 | 0.0 | 2.807354922057604  | 0.0 - * - * idcg = 14.595390756454922 (sum of last column) + /* + Check with normalization: to get the maximal possible dcg, sort documents by + relevance in descending order + + rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) + --------------------------------------------------------------------------------------- + 1 | 3 | 7.0 | 1.0  | 7.0 + 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 + 3 | 2 | 3.0 | 2.0  | 1.5 + 4 | 2 | 3.0 | 2.321928094887362 | 1.2920296742201793 + 5 | 1 | 1.0 | 2.584962500721156  | 0.38685280723454163 + 6 | 0 | 0.0 | 2.807354922057604  | 0.0 + + idcg = 14.595390756454922 (sum of last column) */ dcg = new DiscountedCumulativeGain(true, null, 10); assertEquals(EXPECTED_NDCG, dcg.evaluate("id", hits, rated).metricScore(), DELTA); @@ -113,7 +113,7 @@ public void testDCGAt() { /** * This tests metric when some documents in the search result don't have a * rating provided by the user. - * + *

      * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) * ------------------------------------------------------------------------------------------- * 1 | 3 | 7.0 | 1.0 | 7.0 2 |  @@ -122,7 +122,7 @@ public void testDCGAt() { * 4 | n/a | n/a | n/a | n/a * 5 | 1 | 1.0 | 2.584962500721156 | 0.38685280723454163 * 6 | n/a | n/a | n/a | n/a - * + *

      * dcg = 12.779642067948913 (sum of last column) */ public void testDCGAtSixMissingRatings() { @@ -143,20 +143,20 @@ public void testDCGAtSixMissingRatings() { assertEquals(12.779642067948913, result.metricScore(), DELTA); assertEquals(2, filterUnratedDocuments(result.getHitsAndRatings()).size()); - /** - * Check with normalization: to get the maximal possible dcg, sort documents by - * relevance in descending order - * - * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) - * ---------------------------------------------------------------------------------------- - * 1 | 3 | 7.0 | 1.0  | 7.0 - * 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 - * 3 | 2 | 3.0 | 2.0  | 1.5 - * 4 | 1 | 1.0 | 2.321928094887362   | 0.43067655807339 - * 5 | n.a | n.a | n.a.  | n.a. - * 6 | n.a | n.a | n.a  | n.a - * - * idcg = 13.347184833073591 (sum of last column) + /* + Check with normalization: to get the maximal possible dcg, sort documents by + relevance in descending order + + rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) + ---------------------------------------------------------------------------------------- + 1 | 3 | 7.0 | 1.0  | 7.0 + 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 + 3 | 2 | 3.0 | 2.0  | 1.5 + 4 | 1 | 1.0 | 2.321928094887362   | 0.43067655807339 + 5 | n.a | n.a | n.a.  | n.a. + 6 | n.a | n.a | n.a  | n.a + + idcg = 13.347184833073591 (sum of last column) */ dcg = new DiscountedCumulativeGain(true, null, 10); assertEquals(12.779642067948913 / 13.347184833073591, dcg.evaluate("id", hits, rated).metricScore(), DELTA); @@ -166,7 +166,7 @@ public void testDCGAtSixMissingRatings() { * This tests that normalization works as expected when there are more rated * documents than search hits because we restrict DCG to be calculated at the * fourth position - * + *

      * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) * ------------------------------------------------------------------------------------------- * 1 | 3 | 7.0 | 1.0 | 7.0 2 |  @@ -176,7 +176,7 @@ public void testDCGAtSixMissingRatings() { * ----------------------------------------------------------------- * 5 | 1 | 1.0 | 2.584962500721156 | 0.38685280723454163 * 6 | n/a | n/a | n/a | n/a - * + *

      * dcg = 12.392789260714371 (sum of last column until position 4) */ public void testDCGAtFourMoreRatings() { @@ -200,21 +200,21 @@ public void testDCGAtFourMoreRatings() { assertEquals(12.392789260714371, result.metricScore(), DELTA); assertEquals(1, filterUnratedDocuments(result.getHitsAndRatings()).size()); - /** - * Check with normalization: to get the maximal possible dcg, sort documents by - * relevance in descending order - * - * rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) - * --------------------------------------------------------------------------------------- - * 1 | 3 | 7.0 | 1.0  | 7.0 - * 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 - * 3 | 2 | 3.0 | 2.0  | 1.5 - * 4 | 1 | 1.0 | 2.321928094887362   | 0.43067655807339 - * --------------------------------------------------------------------------------------- - * 5 | n.a | n.a | n.a.  | n.a. - * 6 | n.a | n.a | n.a  | n.a - * - * idcg = 13.347184833073591 (sum of last column) + /* + Check with normalization: to get the maximal possible dcg, sort documents by + relevance in descending order + + rank | relevance | 2^(relevance) - 1 | log_2(rank + 1) | (2^(relevance) - 1) / log_2(rank + 1) + --------------------------------------------------------------------------------------- + 1 | 3 | 7.0 | 1.0  | 7.0 + 2 | 3 | 7.0 | 1.5849625007211563 | 4.416508275000202 + 3 | 2 | 3.0 | 2.0  | 1.5 + 4 | 1 | 1.0 | 2.321928094887362   | 0.43067655807339 + --------------------------------------------------------------------------------------- + 5 | n.a | n.a | n.a.  | n.a. + 6 | n.a | n.a | n.a  | n.a + + idcg = 13.347184833073591 (sum of last column) */ dcg = new DiscountedCumulativeGain(true, null, 10); assertEquals(12.392789260714371 / 13.347184833073591, dcg.evaluate("id", hits, ratedDocs).metricScore(), DELTA); diff --git a/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java b/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java index 7c2fe8d99c330..85c652bab1cb0 100644 --- a/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecReindexIT.java @@ -18,11 +18,15 @@ import org.opensearch.index.engine.Segment; import org.opensearch.index.reindex.BulkByScrollResponse; import org.opensearch.index.reindex.ReindexAction; +import org.opensearch.index.reindex.ReindexPlugin; import org.opensearch.index.reindex.ReindexRequestBuilder; import org.opensearch.index.reindex.ReindexTestCase; +import org.opensearch.plugins.Plugin; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.List; import java.util.Map; import java.util.UUID; import java.util.concurrent.ExecutionException; @@ -40,6 +44,11 @@ public class MultiCodecReindexIT extends ReindexTestCase { + @Override + protected Collection> nodePlugins() { + return List.of(ReindexPlugin.class); + } + public void testReindexingMultipleCodecs() throws InterruptedException, ExecutionException { internalCluster().ensureAtLeastNumDataNodes(1); Map codecMap = Map.of( @@ -47,10 +56,6 @@ public void testReindexingMultipleCodecs() throws InterruptedException, Executio "BEST_COMPRESSION", "zlib", "BEST_COMPRESSION", - "zstd_no_dict", - "ZSTD_NO_DICT", - "zstd", - "ZSTD", "default", "BEST_SPEED", "lz4", @@ -125,7 +130,7 @@ private void assertReindexingWithMultipleCodecs(String destCodec, String destCod } private void useCodec(String index, String codec) throws ExecutionException, InterruptedException { - assertAcked(client().admin().indices().prepareClose(index)); + assertAcked(client().admin().indices().prepareClose(index).setWaitForActiveShards(1)); assertAcked( client().admin() @@ -134,7 +139,7 @@ private void useCodec(String index, String codec) throws ExecutionException, Int .get() ); - assertAcked(client().admin().indices().prepareOpen(index)); + assertAcked(client().admin().indices().prepareOpen(index).setWaitForActiveShards(1)); } private void flushAndRefreshIndex(String index) { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java index e6c1cde5c0e98..c0bd3f4d9184e 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -206,7 +206,7 @@ public abstract class AbstractAsyncBulkByScrollAction< /** * Build the {@link BiFunction} to apply to all {@link RequestWrapper}. - * + *

      * Public for testings.... */ public BiFunction, ScrollableHitSource.Hit, RequestWrapper> buildScriptApplier() { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkByScrollParallelizationHelper.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkByScrollParallelizationHelper.java index d5a6e392f2019..7534de1408bcc 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkByScrollParallelizationHelper.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/BulkByScrollParallelizationHelper.java @@ -63,14 +63,14 @@ private BulkByScrollParallelizationHelper() {} /** * Takes an action created by a {@link BulkByScrollTask} and runs it with regard to whether the request is sliced or not. - * + *

      * If the request is not sliced (i.e. the number of slices is 1), the worker action in the given {@link Runnable} will be started on * the local node. If the request is sliced (i.e. the number of slices is more than 1), then a subrequest will be created for each * slice and sent. - * + *

      * If slices are set as {@code "auto"}, this class will resolve that to a specific number based on characteristics of the source * indices. A request with {@code "auto"} slices may end up being sliced or unsliced. - * + *

      * This method is equivalent to calling {@link #initTaskState} followed by {@link #executeSlicedAction} */ static > void startSlicedAction( @@ -98,11 +98,11 @@ public void onFailure(Exception e) { /** * Takes an action and a {@link BulkByScrollTask} and runs it with regard to whether this task is a * leader or worker. - * + *

      * If this task is a worker, the worker action in the given {@link Runnable} will be started on the local * node. If the task is a leader (i.e. the number of slices is more than 1), then a subrequest will be * created for each slice and sent. - * + *

      * This method can only be called after the task state is initialized {@link #initTaskState}. */ static > void executeSlicedAction( @@ -125,7 +125,7 @@ static > void executeSliced /** * Takes a {@link BulkByScrollTask} and ensures that its initial task state (leader or worker) is set. - * + *

      * If slices are set as {@code "auto"}, this method will resolve that to a specific number based on * characteristics of the source indices. A request with {@code "auto"} slices may end up being sliced or * unsliced. This method does not execute the action. In order to execute the action see diff --git a/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java b/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java index 9e9d94c8e8fc0..4c8d8aab4532b 100644 --- a/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java +++ b/modules/repository-url/src/main/java/org/opensearch/repositories/url/URLRepository.java @@ -113,7 +113,7 @@ public URLRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - super(metadata, false, namedXContentRegistry, clusterService, recoverySettings); + super(metadata, namedXContentRegistry, clusterService, recoverySettings); if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(environment.settings()) == false) { throw new RepositoryException(metadata.name(), "missing url"); diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..aaf2e35302d77 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.96.Final.jar.sha1 deleted file mode 100644 index 7abdb33dc79a2..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b80fffbe77485b457bf844289bf1801f61b9e91 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a77333ea8ae47 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.96.Final.jar.sha1 deleted file mode 100644 index 8fdb32be1de0b..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9cfe430f8b14e7ba86969d8e1126aa0aae4d18f0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..6f26bf4e6a9b5 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.96.Final.jar.sha1 deleted file mode 100644 index dfb0cf39463e2..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a4d0d95df5026965c454902ef3d6d84b81f89626 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..d2ff72db60d1f --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.96.Final.jar.sha1 deleted file mode 100644 index 85b5f52749671..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d10c167623cbc471753f950846df241d1021655c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..f12a6046e96d0 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.96.Final.jar.sha1 deleted file mode 100644 index fe4f48c68e78b..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7840d7523d709e02961b647546f9d9dde1699306 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e4179ba15942 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.96.Final.jar.sha1 deleted file mode 100644 index 9e93f013226cd..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0e51db5568a881e0f9b013b35617c597dc32f130 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..ab2819da570fd --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.96.Final.jar.sha1 deleted file mode 100644 index 707285d3d29c3..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dbd15ca244be28e1a98ed29b9d755edbfa737e02 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..30d7758302e37 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 deleted file mode 100644 index e911c47d5ab1a..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -daf8578cade63a01525ee9d70371fa78e6e91094 \ No newline at end of file diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HeaderVerifierIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HeaderVerifierIT.java new file mode 100644 index 0000000000000..0d41d416499f6 --- /dev/null +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HeaderVerifierIT.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.netty4; + +import org.opensearch.OpenSearchNetty4IntegTestCase; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.transport.Netty4BlockingPlugin; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import io.netty.buffer.ByteBufUtil; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.util.ReferenceCounted; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; +import static io.netty.handler.codec.http.HttpHeaderNames.HOST; + +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) +public class Netty4HeaderVerifierIT extends OpenSearchNetty4IntegTestCase { + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(Netty4BlockingPlugin.class); + } + + public void testThatNettyHttpServerRequestBlockedWithHeaderVerifier() throws Exception { + HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); + TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); + TransportAddress transportAddress = randomFrom(boundAddresses); + + final FullHttpRequest blockedRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + blockedRequest.headers().add("blockme", "Not Allowed"); + blockedRequest.headers().add(HOST, "localhost"); + + final List responses = new ArrayList<>(); + try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + try { + FullHttpResponse blockedResponse = nettyHttpClient.send(transportAddress.address(), blockedRequest); + responses.add(blockedResponse); + String blockedResponseContent = new String(ByteBufUtil.getBytes(blockedResponse.content()), StandardCharsets.UTF_8); + assertThat(blockedResponseContent, containsString("Hit header_verifier")); + assertThat(blockedResponse.status().code(), equalTo(401)); + } finally { + responses.forEach(ReferenceCounted::release); + } + } + } + +} diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java index dcaa9553f5b1f..de5cbfcc7bc2b 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java @@ -57,7 +57,7 @@ /** * This test checks that in-flight requests are limited on HTTP level and that requests that are excluded from limiting can pass. - * + *

      * As the same setting is also used to limit in-flight requests on transport level, we avoid transport messages by forcing * a single node "cluster". */ diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/Netty4BlockingPlugin.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/Netty4BlockingPlugin.java new file mode 100644 index 0000000000000..85729e2120607 --- /dev/null +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/Netty4BlockingPlugin.java @@ -0,0 +1,148 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.netty4.Netty4HttpServerTransport; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Collections; +import java.util.Map; +import java.util.function.Supplier; + +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.handler.codec.http.DefaultHttpRequest; +import io.netty.handler.codec.http.HttpMessage; +import io.netty.util.AttributeKey; +import io.netty.util.ReferenceCountUtil; + +public class Netty4BlockingPlugin extends Netty4Plugin { + + private static final AttributeKey SHOULD_BLOCK = AttributeKey.newInstance("should-block"); + + public class Netty4BlockingHttpServerTransport extends Netty4HttpServerTransport { + + public Netty4BlockingHttpServerTransport( + Settings settings, + NetworkService networkService, + BigArrays bigArrays, + ThreadPool threadPool, + NamedXContentRegistry xContentRegistry, + Dispatcher dispatcher, + ClusterSettings clusterSettings, + SharedGroupFactory sharedGroupFactory, + Tracer tracer + ) { + super( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry, + dispatcher, + clusterSettings, + sharedGroupFactory, + tracer + ); + } + + @Override + protected ChannelInboundHandlerAdapter createHeaderVerifier() { + return new ExampleBlockingNetty4HeaderVerifier(); + } + } + + @Override + public Map> getHttpTransports( + Settings settings, + ThreadPool threadPool, + BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher dispatcher, + ClusterSettings clusterSettings, + Tracer tracer + ) { + return Collections.singletonMap( + NETTY_HTTP_TRANSPORT_NAME, + () -> new Netty4BlockingHttpServerTransport( + settings, + networkService, + bigArrays, + threadPool, + xContentRegistry, + new BlockingDispatcher(dispatcher), + clusterSettings, + getSharedGroupFactory(settings), + tracer + ) + ); + } + + /** POC for how an external header verifier would be implemented */ + public class ExampleBlockingNetty4HeaderVerifier extends SimpleChannelInboundHandler { + + @Override + public void channelRead0(ChannelHandlerContext ctx, DefaultHttpRequest msg) throws Exception { + ReferenceCountUtil.retain(msg); + if (isBlocked(msg)) { + msg.headers().add("blocked", true); + } + ctx.fireChannelRead(msg); + } + + private boolean isBlocked(HttpMessage request) { + final boolean shouldBlock = request.headers().contains("blockme"); + + return shouldBlock; + } + } + + class BlockingDispatcher implements HttpServerTransport.Dispatcher { + + private HttpServerTransport.Dispatcher originalDispatcher; + + public BlockingDispatcher(final HttpServerTransport.Dispatcher originalDispatcher) { + super(); + this.originalDispatcher = originalDispatcher; + } + + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + if (request.getHeaders().containsKey("blocked")) { + channel.sendResponse(new BytesRestResponse(RestStatus.UNAUTHORIZED, "Hit header_verifier")); + return; + } + originalDispatcher.dispatchRequest(request, channel, threadContext); + + } + + @Override + public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, Throwable cause) { + originalDispatcher.dispatchBadRequest(channel, threadContext, cause); + } + } +} diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java index 5bfb7976c6e18..e93a3bf2898e2 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java @@ -39,6 +39,7 @@ import org.opensearch.transport.netty4.Netty4TcpChannel; import java.net.InetSocketAddress; +import java.util.Optional; import io.netty.channel.Channel; @@ -86,6 +87,18 @@ public Channel getNettyChannel() { return channel; } + @SuppressWarnings("unchecked") + @Override + public Optional get(String name, Class clazz) { + Object handler = getNettyChannel().pipeline().get(name); + + if (handler != null && clazz.isInstance(handler) == true) { + return Optional.of((T) handler); + } + + return Optional.empty(); + } + @Override public String toString() { return "Netty4HttpChannel{" + "localAddress=" + getLocalAddress() + ", remoteAddress=" + getRemoteAddress() + '}'; diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequest.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequest.java index 7d937157c1034..3c96affb7adf7 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequest.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpRequest.java @@ -258,7 +258,7 @@ public FullHttpRequest nettyRequest() { /** * A wrapper of {@link HttpHeaders} that implements a map to prevent copying unnecessarily. This class does not support modifications * and due to the underlying implementation, it performs case insensitive lookups of key to values. - * + *

      * It is important to note that this implementation does have some downsides in that each invocation of the * {@link #values()} and {@link #entrySet()} methods will perform a copy of the values in the HttpHeaders rather than returning a * view of the underlying values. diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java index 402c8d8798447..0838cfe8c3273 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpServerTransport.java @@ -52,6 +52,7 @@ import org.opensearch.http.HttpHandlingSettings; import org.opensearch.http.HttpReadTimeoutException; import org.opensearch.http.HttpServerChannel; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.NettyAllocator; import org.opensearch.transport.NettyByteBufSizer; @@ -174,9 +175,10 @@ public Netty4HttpServerTransport( NamedXContentRegistry xContentRegistry, Dispatcher dispatcher, ClusterSettings clusterSettings, - SharedGroupFactory sharedGroupFactory + SharedGroupFactory sharedGroupFactory, + Tracer tracer ) { - super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings); + super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings, tracer); Netty4Utils.setAvailableProcessors(OpenSearchExecutors.NODE_PROCESSORS_SETTING.get(settings)); NettyAllocator.logAllocatorDescriptionIfNeeded(); this.sharedGroupFactory = sharedGroupFactory; @@ -315,8 +317,10 @@ public ChannelHandler configureServerChannelHandler() { return new HttpChannelHandler(this, handlingSettings); } - static final AttributeKey HTTP_CHANNEL_KEY = AttributeKey.newInstance("opensearch-http-channel"); - static final AttributeKey HTTP_SERVER_CHANNEL_KEY = AttributeKey.newInstance("opensearch-http-server-channel"); + public static final AttributeKey HTTP_CHANNEL_KEY = AttributeKey.newInstance("opensearch-http-channel"); + protected static final AttributeKey HTTP_SERVER_CHANNEL_KEY = AttributeKey.newInstance( + "opensearch-http-server-channel" + ); protected static class HttpChannelHandler extends ChannelInitializer { @@ -349,7 +353,8 @@ protected void initChannel(Channel ch) throws Exception { ); decoder.setCumulator(ByteToMessageDecoder.COMPOSITE_CUMULATOR); ch.pipeline().addLast("decoder", decoder); - ch.pipeline().addLast("decoder_compress", new HttpContentDecompressor()); + ch.pipeline().addLast("header_verifier", transport.createHeaderVerifier()); + ch.pipeline().addLast("decoder_compress", transport.createDecompressor()); ch.pipeline().addLast("encoder", new HttpResponseEncoder()); final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); @@ -391,4 +396,21 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { } } } + + /** + * Extension point that allows a NetworkPlugin to extend the netty pipeline and inspect headers after request decoding + */ + protected ChannelInboundHandlerAdapter createHeaderVerifier() { + // pass-through + return new ChannelInboundHandlerAdapter(); + } + + /** + * Extension point that allows a NetworkPlugin to override the default netty HttpContentDecompressor and supply a custom decompressor. + * + * Used in instances to conditionally decompress depending on the outcome from header verification + */ + protected ChannelInboundHandlerAdapter createDecompressor() { + return new HttpContentDecompressor(); + } } diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesSocketChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesSocketChannel.java index b73f74d187a2a..4bab91565d3ad 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesSocketChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/CopyBytesSocketChannel.java @@ -65,7 +65,7 @@ * This class is adapted from {@link NioSocketChannel} class in the Netty project. It overrides the channel * read/write behavior to ensure that the bytes are always copied to a thread-local direct bytes buffer. This * happens BEFORE the call to the Java {@link SocketChannel} is issued. - * + *

      * The purpose of this class is to allow the disabling of netty direct buffer pooling while allowing us to * control how bytes end up being copied to direct memory. If we simply disabled netty pooling, we would rely * on the JDK's internal thread local buffer pooling. Instead, this class allows us to create a one thread diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4Plugin.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4Plugin.java index 9ccfb84d7cc68..4258fa8e04d61 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4Plugin.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4Plugin.java @@ -48,6 +48,7 @@ import org.opensearch.http.netty4.Netty4HttpServerTransport; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.netty4.Netty4Transport; @@ -95,7 +96,8 @@ public Map> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap( NETTY_TRANSPORT_NAME, @@ -107,7 +109,8 @@ public Map> getTransports( pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, - getSharedGroupFactory(settings) + getSharedGroupFactory(settings), + tracer ) ); } @@ -122,7 +125,8 @@ public Map> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { return Collections.singletonMap( NETTY_HTTP_TRANSPORT_NAME, @@ -134,12 +138,13 @@ public Map> getHttpTransports( xContentRegistry, dispatcher, clusterSettings, - getSharedGroupFactory(settings) + getSharedGroupFactory(settings), + tracer ) ); } - private SharedGroupFactory getSharedGroupFactory(Settings settings) { + SharedGroupFactory getSharedGroupFactory(Settings settings) { SharedGroupFactory groupFactory = this.groupFactory.get(); if (groupFactory != null) { assert groupFactory.getSettings().equals(settings) : "Different settings than originally provided"; diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java index 5db1f7c333157..79a5bf9e95121 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4TcpChannel.java @@ -41,6 +41,7 @@ import org.opensearch.transport.TransportException; import java.net.InetSocketAddress; +import java.util.Optional; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; @@ -164,6 +165,18 @@ public void sendMessage(BytesReference reference, ActionListener listener) } } + @SuppressWarnings("unchecked") + @Override + public Optional get(String name, Class clazz) { + final Object handler = getNettyChannel().pipeline().get(name); + + if (handler != null && clazz.isInstance(handler) == true) { + return Optional.of((T) handler); + } + + return Optional.empty(); + } + public Channel getNettyChannel() { return channel; } diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4Transport.java index f77c29c8bfa60..e76a227630dc1 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/Netty4Transport.java @@ -50,6 +50,7 @@ import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Netty4NioSocketChannel; import org.opensearch.transport.NettyAllocator; @@ -131,9 +132,10 @@ public Netty4Transport( PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService, - SharedGroupFactory sharedGroupFactory + SharedGroupFactory sharedGroupFactory, + Tracer tracer ) { - super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); + super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService, tracer); Netty4Utils.setAvailableProcessors(OpenSearchExecutors.NODE_PROCESSORS_SETTING.get(settings)); NettyAllocator.logAllocatorDescriptionIfNeeded(); this.sharedGroupFactory = sharedGroupFactory; diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java index 7c4447413bb31..492da422382ce 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java @@ -47,6 +47,7 @@ import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -112,7 +113,8 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, xContentRegistry(), dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - new SharedGroupFactory(Settings.EMPTY) + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE ) ) { httpServerTransport.start(); diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java index a2e2b8d2dfba2..9bca90c3a408a 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -45,6 +45,7 @@ import org.opensearch.http.HttpResponse; import org.opensearch.http.HttpServerTransport; import org.opensearch.http.NullDispatcher; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -135,7 +136,8 @@ class CustomNettyHttpServerTransport extends Netty4HttpServerTransport { xContentRegistry(), new NullDispatcher(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ); } diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java index 309207499ae62..2bdb95389e737 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java @@ -55,6 +55,7 @@ import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.threadpool.TestThreadPool; @@ -198,7 +199,8 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, xContentRegistry(), dispatcher, clusterSettings, - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -247,7 +249,8 @@ public void testBindUnavailableAddress() { xContentRegistry(), new NullDispatcher(), clusterSettings, - new SharedGroupFactory(Settings.EMPTY) + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -265,7 +268,8 @@ public void testBindUnavailableAddress() { xContentRegistry(), new NullDispatcher(), clusterSettings, - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) ) { BindHttpException bindHttpException = expectThrows(BindHttpException.class, otherTransport::start); @@ -317,7 +321,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, clusterSettings, - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -379,7 +384,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, clusterSettings, - new SharedGroupFactory(Settings.EMPTY) + new SharedGroupFactory(Settings.EMPTY), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -448,7 +454,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -521,7 +528,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) ) { transport.start(); diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java index 3e5f71f1464a1..c92ccba82835f 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java @@ -40,6 +40,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.SharedGroupFactory; @@ -86,7 +87,8 @@ public void startThreadPool() { recycler, new NamedWriteableRegistry(Collections.emptyList()), new NoneCircuitBreakerService(), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ); nettyTransport.start(); diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/NettyTransportMultiPortTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/NettyTransportMultiPortTests.java index 98a001b8ae4bb..7cca00db68559 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/NettyTransportMultiPortTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/NettyTransportMultiPortTests.java @@ -40,6 +40,7 @@ import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -141,7 +142,8 @@ private TcpTransport startTransport(Settings settings, ThreadPool threadPool) { recycler, new NamedWriteableRegistry(Collections.emptyList()), new NoneCircuitBreakerService(), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ); transport.start(); diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java index 35b19002dce8d..710b3ff6bd0ca 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java @@ -44,6 +44,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransportService; import org.opensearch.test.transport.StubbableTransport; import org.opensearch.transport.AbstractSimpleTransportTestCase; @@ -82,7 +83,8 @@ protected Transport build(Settings settings, final Version version, ClusterSetti PageCacheRecycler.NON_RECYCLING_INSTANCE, namedWriteableRegistry, new NoneCircuitBreakerService(), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + NoopTracer.INSTANCE ) { @Override diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.7.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.7.0.jar.sha1 deleted file mode 100644 index 0ed030926ab93..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -94293b169fb8572f440a5a4a523320ecf9778ffe \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..ef410899981ca --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 @@ -0,0 +1 @@ +7133d34e92770f59eb28686f4d511b9f3f32e970 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.7.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.7.0.jar.sha1 deleted file mode 100644 index ddd67276606a5..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2df800a38b64867b8dcd61fc2cd986114e4a80cb \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..46b83c9e40b3a --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 @@ -0,0 +1 @@ +be44282e1f6b91a0650fcceb558053d6bdd4863d \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.7.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.7.0.jar.sha1 deleted file mode 100644 index 0cd68af98e724..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a01e8153f34d72e8c8c0180c1dea5b10f677dd3a \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..36664695a7818 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 @@ -0,0 +1 @@ +bd1f80d33346f7e588685484ef29a304db5190e4 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.7.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.7.0.jar.sha1 deleted file mode 100644 index c7b4d2dc6da75..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b7d47d54683b0b1e09b271c32d1b7d3eb1990f49 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..003ccdf8b0727 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 @@ -0,0 +1 @@ +b9ffdc7a52d2087ecb03318ec06305b480cdfe82 \ No newline at end of file diff --git a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/HaasePhonetik.java b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/HaasePhonetik.java index c80d32228feeb..b875fab7d4006 100644 --- a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/HaasePhonetik.java +++ b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/HaasePhonetik.java @@ -35,13 +35,13 @@ /** * Geänderter Algorithmus aus der Matching Toolbox von Rainer Schnell * Java-Programmierung von Jörg Reiher - * + *

      * Die Kölner Phonetik wurde für den Einsatz in Namensdatenbanken wie * der Verwaltung eines Krankenhauses durch Martin Haase (Institut für * Sprachwissenschaft, Universität zu Köln) und Kai Heitmann (Insitut für * medizinische Statistik, Informatik und Epidemiologie, Köln) überarbeitet. * M. Haase und K. Heitmann. Die Erweiterte Kölner Phonetik. 526, 2000. - * + *

      * nach: Martin Wilz, Aspekte der Kodierung phonetischer Ähnlichkeiten * in deutschen Eigennamen, Magisterarbeit. * http://www.uni-koeln.de/phil-fak/phonetik/Lehre/MA-Arbeiten/magister_wilz.pdf diff --git a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/KoelnerPhonetik.java b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/KoelnerPhonetik.java index 33e386af9f364..4bcc10ff73b0a 100644 --- a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/KoelnerPhonetik.java +++ b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/KoelnerPhonetik.java @@ -46,13 +46,13 @@ /** * Kölner Phonetik - * + *

      * H.J. Postel, Die Kölner Phonetik. Ein Verfahren zu Identifizierung * von Personennamen auf der Grundlage der Gestaltanalyse. IBM-Nachrichten 19 (1969), 925-931 - * + *

      * Algorithmus aus der Matching Toolbox von Rainer Schnell * Java-Programmierung von Jörg Reiher - * + *

      * mit Änderungen von Jörg Prante * */ diff --git a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/Nysiis.java b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/Nysiis.java index 818dbba85e2de..c3237114c65d5 100644 --- a/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/Nysiis.java +++ b/plugins/analysis-phonetic/src/main/java/org/opensearch/index/analysis/phonetic/Nysiis.java @@ -40,7 +40,7 @@ /** * * Taken from commons-codec trunk (unreleased yet) - * + *

      * Encodes a string into a NYSIIS value. NYSIIS is an encoding used to relate * similar names, but can also be used as a general purpose scheme to find word * with similar phonemes. diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.7.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.7.0.jar.sha1 deleted file mode 100644 index 8df7245044171..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e68b9816e6cff8ee15f5b350cf2ffa54f9828b7 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..e22eaa474016f --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 @@ -0,0 +1 @@ +f73e2007b133fb699e517ef13b4952844f0150d8 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.7.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.7.0.jar.sha1 deleted file mode 100644 index 974e4202f5ffb..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d23b1f05b471e05d0d6068b3ece7c8c65672eae7 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..1ebe42a2a2f56 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 @@ -0,0 +1 @@ +2c09cbc021a8f81a01600a1d2a999361e70f7aed \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.7.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.7.0.jar.sha1 deleted file mode 100644 index dce408a7d40ef..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfb4313f3c68d337310522840d7144c1605d084a \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..3c4523d45c0f5 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 @@ -0,0 +1 @@ +b054f2c7b11fc7c5601b4c3cdf18aa7508612898 \ No newline at end of file diff --git a/plugins/crypto-kms/build.gradle b/plugins/crypto-kms/build.gradle new file mode 100644 index 0000000000000..c4a8609b6df48 --- /dev/null +++ b/plugins/crypto-kms/build.gradle @@ -0,0 +1,74 @@ +import org.opensearch.gradle.info.BuildParams + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +apply plugin: 'opensearch.build' +apply plugin: 'opensearch.publish' +apply plugin: 'opensearch.yaml-rest-test' + +opensearchplugin { + description 'AWS KMS plugin to provide crypto keys' + classname 'org.opensearch.crypto.kms.CryptoKmsPlugin' +} + +ext { + // Do not fail on `javadoc` warning (ANTLR generated code) + failOnJavadocWarning = false +} + +dependencies { + api "software.amazon.awssdk:sdk-core:${versions.aws}" + api "software.amazon.awssdk:aws-core:${versions.aws}" + api "software.amazon.awssdk:utils:${versions.aws}" + api "software.amazon.awssdk:auth:${versions.aws}" + api "software.amazon.awssdk:kms:${versions.aws}" + api "software.amazon.awssdk:http-client-spi:${versions.aws}" + api "software.amazon.awssdk:apache-client:${versions.aws}" + api "software.amazon.awssdk:regions:${versions.aws}" + api "software.amazon.awssdk:profiles:${versions.aws}" + api "software.amazon.awssdk:endpoints-spi:${versions.aws}" + api "software.amazon.awssdk:annotations:${versions.aws}" + api "software.amazon.awssdk:metrics-spi:${versions.aws}" + api "software.amazon.awssdk:json-utils:${versions.aws}" + api "software.amazon.awssdk:protocol-core:${versions.aws}" + api "software.amazon.awssdk:aws-query-protocol:${versions.aws}" + api "software.amazon.awssdk:aws-json-protocol:${versions.aws}" + api "software.amazon.awssdk:third-party-jackson-core:${versions.aws}" + api "org.apache.httpcomponents:httpclient:${versions.httpclient}" + api "org.apache.httpcomponents:httpcore:${versions.httpcore}" + api "commons-logging:commons-logging:${versions.commonslogging}" + api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" + api "org.slf4j:slf4j-api:${versions.slf4j}" + api "commons-codec:commons-codec:${versions.commonscodec}" + api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" + api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + api "org.reactivestreams:reactive-streams:${versions.reactivestreams}" +} + +//testClusters.all { +// module ':modules:crypto' +//} + +tasks.named("dependencyLicenses").configure { + mapping from: /jackson-.*/, to: 'jackson' + mapping from: /jaxb-.*/, to: 'jaxb' + mapping from: /netty-.*/, to: 'netty' +} + +bundlePlugin { + from('config/crypto-kms') { + into 'config' + } +} + +thirdPartyAudit.enabled = false +testingConventions.enabled = false diff --git a/plugins/crypto-kms/config/crypto-kms/log4j2.properties b/plugins/crypto-kms/config/crypto-kms/log4j2.properties new file mode 100644 index 0000000000000..285ac4a1d1376 --- /dev/null +++ b/plugins/crypto-kms/config/crypto-kms/log4j2.properties @@ -0,0 +1,22 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# +# Modifications Copyright OpenSearch Contributors. See +# GitHub history for details. +# + +logger.com_amazonaws.name = software.amazon.awssdk +logger.com_amazonaws.level = warn + +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.name = software.amazon.awssdk.jmx.SdkMBeanRegistrySupport +logger.com_amazonaws_jmx_SdkMBeanRegistrySupport.level = error + +logger.com_amazonaws_metrics_AwsSdkMetrics.name = software.amazon.awssdk.metrics.AwsSdkMetrics +logger.com_amazonaws_metrics_AwsSdkMetrics.level = error + +logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.name = software.amazon.awssdk.auth.profile.internal.BasicProfileConfigFileLoader +logger.com_amazonaws_auth_profile_internal_BasicProfileConfigFileLoader.level = error diff --git a/plugins/crypto-kms/licenses/annotations-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/annotations-2.20.55.jar.sha1 new file mode 100644 index 0000000000000..5a626eeb5725b --- /dev/null +++ b/plugins/crypto-kms/licenses/annotations-2.20.55.jar.sha1 @@ -0,0 +1 @@ +330e9d0e5f2401fffba5afe30f3740f400e8308d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-logs-LICENSE.txt b/plugins/crypto-kms/licenses/annotations-LICENSE.txt similarity index 100% rename from plugins/telemetry-otel/licenses/opentelemetry-api-logs-LICENSE.txt rename to plugins/crypto-kms/licenses/annotations-LICENSE.txt diff --git a/plugins/crypto-kms/licenses/annotations-NOTICE.txt b/plugins/crypto-kms/licenses/annotations-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/annotations-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/apache-client-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/apache-client-2.20.55.jar.sha1 new file mode 100644 index 0000000000000..3ee96bb6e4076 --- /dev/null +++ b/plugins/crypto-kms/licenses/apache-client-2.20.55.jar.sha1 @@ -0,0 +1 @@ +5c149885667d41a306769505cfa481cfddf6f113 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/apache-client-LICENSE.txt b/plugins/crypto-kms/licenses/apache-client-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/apache-client-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/apache-client-NOTICE.txt b/plugins/crypto-kms/licenses/apache-client-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/apache-client-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/auth-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/auth-2.20.55.jar.sha1 new file mode 100644 index 0000000000000..010464bdf9fd1 --- /dev/null +++ b/plugins/crypto-kms/licenses/auth-2.20.55.jar.sha1 @@ -0,0 +1 @@ +e21f00a8a2096d5044f3eff176944256e01a175e \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/auth-LICENSE.txt b/plugins/crypto-kms/licenses/auth-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/auth-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/auth-NOTICE.txt b/plugins/crypto-kms/licenses/auth-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/auth-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/aws-core-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/aws-core-2.20.55.jar.sha1 new file mode 100644 index 0000000000000..4b4ee1db864a8 --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-core-2.20.55.jar.sha1 @@ -0,0 +1 @@ +734427c2cece98a8cb90871b78d2311e4a7ef746 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-core-LICENSE.txt b/plugins/crypto-kms/licenses/aws-core-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-core-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/aws-core-NOTICE.txt b/plugins/crypto-kms/licenses/aws-core-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-core-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/aws-json-protocol-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/aws-json-protocol-2.20.55.jar.sha1 new file mode 100644 index 0000000000000..45a88305c1928 --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-json-protocol-2.20.55.jar.sha1 @@ -0,0 +1 @@ +a52731c86b974aefa5bbb1c545f407811a0163b1 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-json-protocol-LICENSE.txt b/plugins/crypto-kms/licenses/aws-json-protocol-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-json-protocol-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/aws-json-protocol-NOTICE.txt b/plugins/crypto-kms/licenses/aws-json-protocol-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-json-protocol-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/aws-query-protocol-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/aws-query-protocol-2.20.55.jar.sha1 new file mode 100644 index 0000000000000..ba5f43378730c --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-query-protocol-2.20.55.jar.sha1 @@ -0,0 +1 @@ +ac116215cc85366f0bdffee53b4c21e7a7fe03ef \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/aws-query-protocol-LICENSE.txt b/plugins/crypto-kms/licenses/aws-query-protocol-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-query-protocol-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/aws-query-protocol-NOTICE.txt b/plugins/crypto-kms/licenses/aws-query-protocol-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/aws-query-protocol-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/commons-codec-1.15.jar.sha1 b/plugins/crypto-kms/licenses/commons-codec-1.15.jar.sha1 new file mode 100644 index 0000000000000..62d99837b87e1 --- /dev/null +++ b/plugins/crypto-kms/licenses/commons-codec-1.15.jar.sha1 @@ -0,0 +1 @@ +49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/commons-codec-LICENSE.txt b/plugins/crypto-kms/licenses/commons-codec-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/commons-codec-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/commons-codec-NOTICE.txt b/plugins/crypto-kms/licenses/commons-codec-NOTICE.txt new file mode 100644 index 0000000000000..56916449bbe10 --- /dev/null +++ b/plugins/crypto-kms/licenses/commons-codec-NOTICE.txt @@ -0,0 +1,17 @@ +Apache Commons Codec +Copyright 2002-2015 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java +contains test data from http://aspell.net/test/orig/batch0.tab. +Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org) + +=============================================================================== + +The content of package org.apache.commons.codec.language.bm has been translated +from the original php source code available at http://stevemorse.org/phoneticinfo.htm +with permission from the original authors. +Original source copyright: +Copyright (c) 2008 Alexander Beider & Stephen P. Morse. diff --git a/plugins/crypto-kms/licenses/commons-logging-1.2.jar.sha1 b/plugins/crypto-kms/licenses/commons-logging-1.2.jar.sha1 new file mode 100644 index 0000000000000..f40f0242448e8 --- /dev/null +++ b/plugins/crypto-kms/licenses/commons-logging-1.2.jar.sha1 @@ -0,0 +1 @@ +4bfc12adfe4842bf07b657f0369c4cb522955686 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/commons-logging-LICENSE.txt b/plugins/crypto-kms/licenses/commons-logging-LICENSE.txt new file mode 100644 index 0000000000000..57bc88a15a0ee --- /dev/null +++ b/plugins/crypto-kms/licenses/commons-logging-LICENSE.txt @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/plugins/crypto-kms/licenses/commons-logging-NOTICE.txt b/plugins/crypto-kms/licenses/commons-logging-NOTICE.txt new file mode 100644 index 0000000000000..72eb32a902458 --- /dev/null +++ b/plugins/crypto-kms/licenses/commons-logging-NOTICE.txt @@ -0,0 +1,5 @@ +Apache Commons CLI +Copyright 2001-2009 The Apache Software Foundation + +This product includes software developed by +The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/crypto-kms/licenses/endpoints-spi-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/endpoints-spi-2.20.55.jar.sha1 new file mode 100644 index 0000000000000..5bc0e31166c77 --- /dev/null +++ b/plugins/crypto-kms/licenses/endpoints-spi-2.20.55.jar.sha1 @@ -0,0 +1 @@ +085f82038ee86a7d6cd568fe7edd842978d92de3 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/endpoints-spi-LICENSE.txt b/plugins/crypto-kms/licenses/endpoints-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/endpoints-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/endpoints-spi-NOTICE.txt b/plugins/crypto-kms/licenses/endpoints-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/endpoints-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/http-client-spi-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/http-client-spi-2.20.55.jar.sha1 new file mode 100644 index 0000000000000..523cf43dcb2e9 --- /dev/null +++ b/plugins/crypto-kms/licenses/http-client-spi-2.20.55.jar.sha1 @@ -0,0 +1 @@ +34f9b10c1a46038a0ceebdd750ba3a413a862ceb \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/http-client-spi-LICENSE.txt b/plugins/crypto-kms/licenses/http-client-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/http-client-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/http-client-spi-NOTICE.txt b/plugins/crypto-kms/licenses/http-client-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/http-client-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/httpclient-4.5.14.jar.sha1 b/plugins/crypto-kms/licenses/httpclient-4.5.14.jar.sha1 new file mode 100644 index 0000000000000..66e05851c2e3c --- /dev/null +++ b/plugins/crypto-kms/licenses/httpclient-4.5.14.jar.sha1 @@ -0,0 +1 @@ +1194890e6f56ec29177673f2f12d0b8e627dec98 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/httpclient-LICENSE.txt b/plugins/crypto-kms/licenses/httpclient-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/plugins/crypto-kms/licenses/httpclient-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/plugins/crypto-kms/licenses/httpclient-NOTICE.txt b/plugins/crypto-kms/licenses/httpclient-NOTICE.txt new file mode 100644 index 0000000000000..4f6058178b201 --- /dev/null +++ b/plugins/crypto-kms/licenses/httpclient-NOTICE.txt @@ -0,0 +1,5 @@ +Apache HttpComponents Client +Copyright 1999-2015 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/crypto-kms/licenses/httpcore-4.4.16.jar.sha1 b/plugins/crypto-kms/licenses/httpcore-4.4.16.jar.sha1 new file mode 100644 index 0000000000000..172110694b5bd --- /dev/null +++ b/plugins/crypto-kms/licenses/httpcore-4.4.16.jar.sha1 @@ -0,0 +1 @@ +51cf043c87253c9f58b539c9f7e44c8894223850 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/httpcore-LICENSE.txt b/plugins/crypto-kms/licenses/httpcore-LICENSE.txt new file mode 100644 index 0000000000000..72819a9f06f2a --- /dev/null +++ b/plugins/crypto-kms/licenses/httpcore-LICENSE.txt @@ -0,0 +1,241 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project contains annotations in the package org.apache.http.annotation +which are derived from JCIP-ANNOTATIONS +Copyright (c) 2005 Brian Goetz and Tim Peierls. +See http://www.jcip.net and the Creative Commons Attribution License +(http://creativecommons.org/licenses/by/2.5) +Full text: http://creativecommons.org/licenses/by/2.5/legalcode + +License + +THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. + +BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS. + +1. Definitions + + "Collective Work" means a work, such as a periodical issue, anthology or encyclopedia, in which the Work in its entirety in unmodified form, along with a number of other contributions, constituting separate and independent works in themselves, are assembled into a collective whole. A work that constitutes a Collective Work will not be considered a Derivative Work (as defined below) for the purposes of this License. + "Derivative Work" means a work based upon the Work or upon the Work and other pre-existing works, such as a translation, musical arrangement, dramatization, fictionalization, motion picture version, sound recording, art reproduction, abridgment, condensation, or any other form in which the Work may be recast, transformed, or adapted, except that a work that constitutes a Collective Work will not be considered a Derivative Work for the purpose of this License. For the avoidance of doubt, where the Work is a musical composition or sound recording, the synchronization of the Work in timed-relation with a moving image ("synching") will be considered a Derivative Work for the purpose of this License. + "Licensor" means the individual or entity that offers the Work under the terms of this License. + "Original Author" means the individual or entity who created the Work. + "Work" means the copyrightable work of authorship offered under the terms of this License. + "You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation. + +2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or restrict any rights arising from fair use, first sale or other limitations on the exclusive rights of the copyright owner under copyright law or other applicable laws. + +3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below: + + to reproduce the Work, to incorporate the Work into one or more Collective Works, and to reproduce the Work as incorporated in the Collective Works; + to create and reproduce Derivative Works; + to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission the Work including as incorporated in Collective Works; + to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission Derivative Works. + + For the avoidance of doubt, where the work is a musical composition: + Performance Royalties Under Blanket Licenses. Licensor waives the exclusive right to collect, whether individually or via a performance rights society (e.g. ASCAP, BMI, SESAC), royalties for the public performance or public digital performance (e.g. webcast) of the Work. + Mechanical Rights and Statutory Royalties. Licensor waives the exclusive right to collect, whether individually or via a music rights agency or designated agent (e.g. Harry Fox Agency), royalties for any phonorecord You create from the Work ("cover version") and distribute, subject to the compulsory license created by 17 USC Section 115 of the US Copyright Act (or the equivalent in other jurisdictions). + Webcasting Rights and Statutory Royalties. For the avoidance of doubt, where the Work is a sound recording, Licensor waives the exclusive right to collect, whether individually or via a performance-rights society (e.g. SoundExchange), royalties for the public digital performance (e.g. webcast) of the Work, subject to the compulsory license created by 17 USC Section 114 of the US Copyright Act (or the equivalent in other jurisdictions). + +The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. All rights not expressly granted by Licensor are hereby reserved. + +4. Restrictions.The license granted in Section 3 above is expressly made subject to and limited by the following restrictions: + + You may distribute, publicly display, publicly perform, or publicly digitally perform the Work only under the terms of this License, and You must include a copy of, or the Uniform Resource Identifier for, this License with every copy or phonorecord of the Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Work that alter or restrict the terms of this License or the recipients' exercise of the rights granted hereunder. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties. You may not distribute, publicly display, publicly perform, or publicly digitally perform the Work with any technological measures that control access or use of the Work in a manner inconsistent with the terms of this License Agreement. The above applies to the Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Work itself to be made subject to the terms of this License. If You create a Collective Work, upon notice from any Licensor You must, to the extent practicable, remove from the Collective Work any credit as required by clause 4(b), as requested. If You create a Derivative Work, upon notice from any Licensor You must, to the extent practicable, remove from the Derivative Work any credit as required by clause 4(b), as requested. + If you distribute, publicly display, publicly perform, or publicly digitally perform the Work or any Derivative Works or Collective Works, You must keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of the Original Author (or pseudonym, if applicable) if supplied, and/or (ii) if the Original Author and/or Licensor designate another party or parties (e.g. a sponsor institute, publishing entity, journal) for attribution in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; the title of the Work if supplied; to the extent reasonably practicable, the Uniform Resource Identifier, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and in the case of a Derivative Work, a credit identifying the use of the Work in the Derivative Work (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). Such credit may be implemented in any reasonable manner; provided, however, that in the case of a Derivative Work or Collective Work, at a minimum such credit will appear where any other comparable authorship credit appears and in a manner at least as prominent as such other comparable authorship credit. + +5. Representations, Warranties and Disclaimer + +UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. + +6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. Termination + + This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Derivative Works or Collective Works from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License. + Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above. + +8. Miscellaneous + + Each time You distribute or publicly digitally perform the Work or a Collective Work, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License. + Each time You distribute or publicly digitally perform a Derivative Work, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License. + If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent. + This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You. diff --git a/plugins/crypto-kms/licenses/httpcore-NOTICE.txt b/plugins/crypto-kms/licenses/httpcore-NOTICE.txt new file mode 100644 index 0000000000000..c0be50a505ec1 --- /dev/null +++ b/plugins/crypto-kms/licenses/httpcore-NOTICE.txt @@ -0,0 +1,8 @@ +Apache HttpComponents Core +Copyright 2005-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +This project contains annotations derived from JCIP-ANNOTATIONS +Copyright (c) 2005 Brian Goetz and Tim Peierls. See http://www.jcip.net diff --git a/plugins/crypto-kms/licenses/jackson-LICENSE b/plugins/crypto-kms/licenses/jackson-LICENSE new file mode 100644 index 0000000000000..f5f45d26a49d6 --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/plugins/crypto-kms/licenses/jackson-NOTICE b/plugins/crypto-kms/licenses/jackson-NOTICE new file mode 100644 index 0000000000000..4c976b7b4cc58 --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-NOTICE @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.15.2.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.15.2.jar.sha1 new file mode 100644 index 0000000000000..f63416ddb8ceb --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-annotations-2.15.2.jar.sha1 @@ -0,0 +1 @@ +4724a65ac8e8d156a24898d50fd5dbd3642870b8 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.15.2.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.15.2.jar.sha1 new file mode 100644 index 0000000000000..f16d80af8dce6 --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-databind-2.15.2.jar.sha1 @@ -0,0 +1 @@ +9353b021f10c307c00328f52090de2bdb4b6ff9c \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/json-utils-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/json-utils-2.20.55.jar.sha1 new file mode 100644 index 0000000000000..a19b00e62f8b5 --- /dev/null +++ b/plugins/crypto-kms/licenses/json-utils-2.20.55.jar.sha1 @@ -0,0 +1 @@ +cd6710900e3190eac4c4496ae529ce08680dd320 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/json-utils-LICENSE.txt b/plugins/crypto-kms/licenses/json-utils-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/json-utils-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/json-utils-NOTICE.txt b/plugins/crypto-kms/licenses/json-utils-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/json-utils-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/kms-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/kms-2.20.55.jar.sha1 new file mode 100644 index 0000000000000..0b4e98f59a066 --- /dev/null +++ b/plugins/crypto-kms/licenses/kms-2.20.55.jar.sha1 @@ -0,0 +1 @@ +389780132dd417ab58e0bb9b269d738ff839605f \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/kms-LICENSE.txt b/plugins/crypto-kms/licenses/kms-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/kms-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/kms-NOTICE.txt b/plugins/crypto-kms/licenses/kms-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/kms-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/log4j-LICENSE.txt b/plugins/crypto-kms/licenses/log4j-LICENSE.txt new file mode 100644 index 0000000000000..6279e5206de13 --- /dev/null +++ b/plugins/crypto-kms/licenses/log4j-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2005 The Apache Software Foundation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/log4j-NOTICE.txt b/plugins/crypto-kms/licenses/log4j-NOTICE.txt new file mode 100644 index 0000000000000..0375732360047 --- /dev/null +++ b/plugins/crypto-kms/licenses/log4j-NOTICE.txt @@ -0,0 +1,5 @@ +Apache log4j +Copyright 2007 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/metrics-spi-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/metrics-spi-2.20.55.jar.sha1 new file mode 100644 index 0000000000000..db6701d87892a --- /dev/null +++ b/plugins/crypto-kms/licenses/metrics-spi-2.20.55.jar.sha1 @@ -0,0 +1 @@ +8a0eae705b27465516f3b09cc9918e40963d534d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/metrics-spi-LICENSE.txt b/plugins/crypto-kms/licenses/metrics-spi-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/metrics-spi-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/metrics-spi-NOTICE.txt b/plugins/crypto-kms/licenses/metrics-spi-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/metrics-spi-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/profiles-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/profiles-2.20.55.jar.sha1 new file mode 100644 index 0000000000000..b7104cf0939e6 --- /dev/null +++ b/plugins/crypto-kms/licenses/profiles-2.20.55.jar.sha1 @@ -0,0 +1 @@ +959aad08b2f24057bf286c761b49e3af31a0a623 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/profiles-LICENSE.txt b/plugins/crypto-kms/licenses/profiles-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/profiles-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/profiles-NOTICE.txt b/plugins/crypto-kms/licenses/profiles-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/profiles-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/protocol-core-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/protocol-core-2.20.55.jar.sha1 new file mode 100644 index 0000000000000..4dee45f4d9dd3 --- /dev/null +++ b/plugins/crypto-kms/licenses/protocol-core-2.20.55.jar.sha1 @@ -0,0 +1 @@ +0935e3ab32962a890f1d13bf39ba2167d9d692f9 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/protocol-core-LICENSE.txt b/plugins/crypto-kms/licenses/protocol-core-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/protocol-core-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/protocol-core-NOTICE.txt b/plugins/crypto-kms/licenses/protocol-core-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/protocol-core-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/reactive-streams-1.0.4.jar.sha1 b/plugins/crypto-kms/licenses/reactive-streams-1.0.4.jar.sha1 new file mode 100644 index 0000000000000..45a80e3f7e361 --- /dev/null +++ b/plugins/crypto-kms/licenses/reactive-streams-1.0.4.jar.sha1 @@ -0,0 +1 @@ +3864a1320d97d7b045f729a326e1e077661f31b7 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/reactive-streams-LICENSE.txt b/plugins/crypto-kms/licenses/reactive-streams-LICENSE.txt new file mode 100644 index 0000000000000..1e3c7e7c77495 --- /dev/null +++ b/plugins/crypto-kms/licenses/reactive-streams-LICENSE.txt @@ -0,0 +1,21 @@ +MIT No Attribution + +Copyright 2014 Reactive Streams + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-logs-NOTICE.txt b/plugins/crypto-kms/licenses/reactive-streams-NOTICE.txt similarity index 100% rename from plugins/telemetry-otel/licenses/opentelemetry-api-logs-NOTICE.txt rename to plugins/crypto-kms/licenses/reactive-streams-NOTICE.txt diff --git a/plugins/crypto-kms/licenses/regions-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/regions-2.20.55.jar.sha1 new file mode 100644 index 0000000000000..993fc2f97de62 --- /dev/null +++ b/plugins/crypto-kms/licenses/regions-2.20.55.jar.sha1 @@ -0,0 +1 @@ +a117c19b4a30e902f4f1cc4bef6b5c10cc9aef31 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/regions-LICENSE.txt b/plugins/crypto-kms/licenses/regions-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/regions-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/regions-NOTICE.txt b/plugins/crypto-kms/licenses/regions-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/regions-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/sdk-core-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/sdk-core-2.20.55.jar.sha1 new file mode 100644 index 0000000000000..5f12be9c08c5b --- /dev/null +++ b/plugins/crypto-kms/licenses/sdk-core-2.20.55.jar.sha1 @@ -0,0 +1 @@ +8f2347feaf2575560ca89a2caa8d0243dbeb17a9 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/sdk-core-LICENSE.txt b/plugins/crypto-kms/licenses/sdk-core-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/sdk-core-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/sdk-core-NOTICE.txt b/plugins/crypto-kms/licenses/sdk-core-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/sdk-core-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/crypto-kms/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/plugins/crypto-kms/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/slf4j-api-LICENSE.txt b/plugins/crypto-kms/licenses/slf4j-api-LICENSE.txt new file mode 100644 index 0000000000000..2be7689435062 --- /dev/null +++ b/plugins/crypto-kms/licenses/slf4j-api-LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) 2004-2022 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/slf4j-api-NOTICE.txt b/plugins/crypto-kms/licenses/slf4j-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.55.jar.sha1 new file mode 100644 index 0000000000000..e7eebbb98f1fe --- /dev/null +++ b/plugins/crypto-kms/licenses/third-party-jackson-core-2.20.55.jar.sha1 @@ -0,0 +1 @@ +956912f26056fc7d46b2db566362fe5f7a8c0e14 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/third-party-jackson-core-LICENSE.txt b/plugins/crypto-kms/licenses/third-party-jackson-core-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/third-party-jackson-core-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/third-party-jackson-core-NOTICE.txt b/plugins/crypto-kms/licenses/third-party-jackson-core-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/third-party-jackson-core-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/licenses/utils-2.20.55.jar.sha1 b/plugins/crypto-kms/licenses/utils-2.20.55.jar.sha1 new file mode 100644 index 0000000000000..fc4cde604e33c --- /dev/null +++ b/plugins/crypto-kms/licenses/utils-2.20.55.jar.sha1 @@ -0,0 +1 @@ +d3e1bbbc19795eadbeb4dd963a94647576644097 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/utils-LICENSE.txt b/plugins/crypto-kms/licenses/utils-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/crypto-kms/licenses/utils-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/crypto-kms/licenses/utils-NOTICE.txt b/plugins/crypto-kms/licenses/utils-NOTICE.txt new file mode 100644 index 0000000000000..d2bc5723e9aea --- /dev/null +++ b/plugins/crypto-kms/licenses/utils-NOTICE.txt @@ -0,0 +1,14 @@ +OpenSearch (https://opensearch.org/) +Copyright OpenSearch Contributors + +This product includes software developed by +Elasticsearch (http://www.elastic.co). +Copyright 2009-2018 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). + +This product includes software developed by +Joda.org (http://www.joda.org/). + + diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/AmazonKmsClientReference.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/AmazonKmsClientReference.java new file mode 100644 index 0000000000000..8cd07f681f9cd --- /dev/null +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/AmazonKmsClientReference.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import software.amazon.awssdk.services.kms.KmsClient; + +import org.opensearch.common.concurrent.RefCountedReleasable; + +/** + * Handles the shutdown of the wrapped {@link KmsClient} using reference + * counting. + */ +public class AmazonKmsClientReference extends RefCountedReleasable { + + AmazonKmsClientReference(KmsClient client) { + super("AWS_KMS_CLIENT", client, client::close); + } +} diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/CredentialProviderFactory.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/CredentialProviderFactory.java new file mode 100644 index 0000000000000..87a10f1408b8d --- /dev/null +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/CredentialProviderFactory.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.ContainerCredentialsProvider; +import software.amazon.awssdk.auth.credentials.InstanceProfileCredentialsProvider; +import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.SdkSystemSetting; + +import java.util.function.Supplier; + +/** + * Creates credential providers based on the provided configuration. + */ +public class CredentialProviderFactory { + + /** + * Credential provider for EC2/ECS container + */ + static class PrivilegedInstanceProfileCredentialsProvider implements AwsCredentialsProvider { + private final AwsCredentialsProvider credentials; + + private PrivilegedInstanceProfileCredentialsProvider() { + this.credentials = initializeProvider(); + } + + private AwsCredentialsProvider initializeProvider() { + if (SdkSystemSetting.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI.getStringValue().isPresent() + || SdkSystemSetting.AWS_CONTAINER_CREDENTIALS_FULL_URI.getStringValue().isPresent()) { + + return ContainerCredentialsProvider.builder().asyncCredentialUpdateEnabled(true).build(); + } + // InstanceProfileCredentialsProvider as last item of chain + return InstanceProfileCredentialsProvider.builder().asyncCredentialUpdateEnabled(true).build(); + } + + @Override + public AwsCredentials resolveCredentials() { + return SocketAccess.doPrivileged(credentials::resolveCredentials); + } + } + + /** + * Creates a credential provider based on the provided configuration. + * @param staticCredsSupplier Static credentials are used in case supplier returns a non-null instance. + * @return Credential provider instance. + */ + public AwsCredentialsProvider createAwsCredentialsProvider(Supplier staticCredsSupplier) { + AwsCredentials awsCredentials = staticCredsSupplier.get(); + if (awsCredentials != null) { + return StaticCredentialsProvider.create(awsCredentials); + } + + // Add other credential providers here + return new PrivilegedInstanceProfileCredentialsProvider(); + } +} diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/CryptoKmsPlugin.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/CryptoKmsPlugin.java new file mode 100644 index 0000000000000..f2ea9e37a0c09 --- /dev/null +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/CryptoKmsPlugin.java @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import org.opensearch.SpecialPermission; +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.common.crypto.MasterKeyProvider; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.CryptoKeyProviderPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.ReloadablePlugin; + +import java.util.Arrays; +import java.util.List; + +/** + * AWS KMS based crypto key provider plugin. + */ +public class CryptoKmsPlugin extends Plugin implements CryptoKeyProviderPlugin, ReloadablePlugin { + private static final String PROVIDER_NAME = "aws-kms"; + + static { + SpecialPermission.check(); + } + + private final Settings settings; + // protected for testing + protected final KmsService kmsService; + + public CryptoKmsPlugin(Settings settings) { + this(settings, new KmsService()); + } + + protected CryptoKmsPlugin(Settings settings, KmsService kmsService) { + this.settings = settings; + this.kmsService = kmsService; + // eagerly load client settings when secure settings are accessible + reload(settings); + } + + @Override + public MasterKeyProvider createKeyProvider(CryptoMetadata cryptoMetadata) { + return kmsService.createMasterKeyProvider(cryptoMetadata); + } + + @Override + public String type() { + return PROVIDER_NAME; + } + + @Override + public List> getSettings() { + return Arrays.asList( + KmsClientSettings.ACCESS_KEY_SETTING, + KmsClientSettings.SECRET_KEY_SETTING, + KmsClientSettings.SESSION_TOKEN_SETTING, + KmsClientSettings.ENDPOINT_SETTING, + KmsClientSettings.REGION_SETTING, + KmsClientSettings.PROXY_HOST_SETTING, + KmsClientSettings.PROXY_PORT_SETTING, + KmsClientSettings.PROXY_USERNAME_SETTING, + KmsClientSettings.PROXY_PASSWORD_SETTING, + KmsClientSettings.READ_TIMEOUT_SETTING, + KmsService.ENC_CTX_SETTING, + KmsService.KEY_ARN_SETTING + ); + } + + @Override + public void reload(Settings settings) { + // secure settings should be readable + final KmsClientSettings clientSettings = KmsClientSettings.getClientSettings(settings); + kmsService.refreshAndClearCache(clientSettings); + } + + @Override + public void close() { + kmsService.close(); + } +} diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsClientSettings.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsClientSettings.java new file mode 100644 index 0000000000000..187a80a6355f7 --- /dev/null +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsClientSettings.java @@ -0,0 +1,258 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.SecureSetting; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsException; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.settings.SecureString; + +import java.util.Locale; +import java.util.Objects; + +/** + * A container for settings used to create an kms client. + */ +public class KmsClientSettings { + + /** The access key (ie login id) for connecting to kms. */ + static final Setting ACCESS_KEY_SETTING = SecureSetting.secureString("kms.access_key", null); + + /** The secret key (ie password) for connecting to kms. */ + static final Setting SECRET_KEY_SETTING = SecureSetting.secureString("kms.secret_key", null); + + /** The session token for connecting to kms. */ + static final Setting SESSION_TOKEN_SETTING = SecureSetting.secureString("kms.session_token", null); + + /** The host name of a proxy to connect to kms through. */ + static final Setting PROXY_HOST_SETTING = Setting.simpleString("kms.proxy.host", Property.NodeScope); + + /** The port of a proxy to connect to kms through. */ + static final Setting PROXY_PORT_SETTING = Setting.intSetting("kms.proxy.port", 80, 0, 1 << 16, Property.NodeScope); + + /** An override for the kms endpoint to connect to. */ + static final Setting ENDPOINT_SETTING = new Setting<>("kms.endpoint", "", s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); + + /** An override for the scoping region for authentication. */ + static final Setting REGION_SETTING = new Setting<>("kms.region", "", s -> s.toLowerCase(Locale.ROOT), Property.NodeScope); + + /** The username of a proxy to connect to kms through. */ + static final Setting PROXY_USERNAME_SETTING = SecureSetting.secureString("kms.proxy.username", null); + + /** The password of a proxy to connect to kms through. */ + static final Setting PROXY_PASSWORD_SETTING = SecureSetting.secureString("kms.proxy.password", null); + + /** The socket timeout for connecting to kms. */ + static final Setting READ_TIMEOUT_SETTING = Setting.timeSetting( + "kms.read_timeout", + TimeValue.timeValueMillis(50_000), + Property.NodeScope + ); + + private static final Logger logger = LogManager.getLogger(KmsClientSettings.class); + + /** Credentials to authenticate with kms. */ + final AwsCredentials credentials; + + /** + * The kms endpoint the client should talk to, or empty string to use the + * default. + */ + final String endpoint; + + /** + * The kms signing region. + */ + final String region; + + /** An optional proxy host that requests to kms should be made through. */ + final String proxyHost; + + /** The port number the proxy host should be connected on. */ + final int proxyPort; + + // these should be "secure" yet the api for the kms client only takes String, so + // storing them + // as SecureString here won't really help with anything + /** An optional username for the proxy host, for basic authentication. */ + final String proxyUsername; + + /** An optional password for the proxy host, for basic authentication. */ + final String proxyPassword; + + /** The read timeout for the kms client. */ + final int readTimeoutMillis; + + protected KmsClientSettings( + AwsCredentials credentials, + String endpoint, + String region, + String proxyHost, + int proxyPort, + String proxyUsername, + String proxyPassword, + int readTimeoutMillis + ) { + this.credentials = credentials; + this.endpoint = endpoint; + this.region = region; + this.proxyHost = proxyHost; + this.proxyPort = proxyPort; + this.proxyUsername = proxyUsername; + this.proxyPassword = proxyPassword; + this.readTimeoutMillis = readTimeoutMillis; + } + + static AwsCredentials loadCredentials(Settings settings) { + try ( + SecureString key = ACCESS_KEY_SETTING.get(settings); + SecureString secret = SECRET_KEY_SETTING.get(settings); + SecureString sessionToken = SESSION_TOKEN_SETTING.get(settings) + ) { + if (key.length() == 0 && secret.length() == 0) { + if (sessionToken.length() > 0) { + throw new SettingsException( + "Setting [{}] is set but [{}] and [{}] are not", + SESSION_TOKEN_SETTING.getKey(), + ACCESS_KEY_SETTING.getKey(), + SECRET_KEY_SETTING.getKey() + ); + } + + logger.debug("Using either environment variables, system properties or instance profile credentials"); + return null; + } else { + if (key.length() == 0) { + throw new SettingsException( + "Setting [{}] is set but [{}] is not", + SECRET_KEY_SETTING.getKey(), + ACCESS_KEY_SETTING.getKey(), + SECRET_KEY_SETTING.getKey() + ); + } + if (secret.length() == 0) { + throw new SettingsException( + "Setting [{}] is set but [{}] is not", + ACCESS_KEY_SETTING.getKey(), + SECRET_KEY_SETTING.getKey() + ); + } + + final AwsCredentials credentials; + if (sessionToken.length() == 0) { + logger.debug("Using basic key/secret credentials"); + credentials = AwsBasicCredentials.create(key.toString(), secret.toString()); + } else { + logger.debug("Using basic session credentials"); + credentials = AwsSessionCredentials.create(key.toString(), secret.toString(), sessionToken.toString()); + } + return credentials; + } + } + } + + /** Parse settings for a single client. */ + static KmsClientSettings getClientSettings(Settings settings) { + final AwsCredentials credentials = loadCredentials(settings); + try ( + SecureString proxyUsername = PROXY_USERNAME_SETTING.get(settings); + SecureString proxyPassword = PROXY_PASSWORD_SETTING.get(settings) + ) { + return new KmsClientSettings( + credentials, + ENDPOINT_SETTING.get(settings), + REGION_SETTING.get(settings), + PROXY_HOST_SETTING.get(settings), + PROXY_PORT_SETTING.get(settings), + proxyUsername.toString(), + proxyPassword.toString(), + (int) READ_TIMEOUT_SETTING.get(settings).millis() + ); + } + } + + KmsClientSettings getMetadataSettings(Settings settings) { + AwsCredentials newCredentials = loadCredentials(settings); + newCredentials = newCredentials == null ? this.credentials : newCredentials; + final Settings normalizedSettings = Settings.builder().put(settings).normalizePrefix("kms.").build(); + + String newProxyUsername = this.proxyUsername, newProxyPassword = this.proxyPassword; + if (PROXY_USERNAME_SETTING.exists(normalizedSettings)) { + try (SecureString proxyUsername = PROXY_USERNAME_SETTING.get(settings)) { + newProxyUsername = proxyUsername.toString(); + } + } + if (PROXY_PASSWORD_SETTING.exists(normalizedSettings)) { + try (SecureString proxyPassword = PROXY_PASSWORD_SETTING.get(settings)) { + newProxyPassword = proxyPassword.toString(); + } + } + + String newEndpoint = getCryptoMetadataSettingOrExisting(ENDPOINT_SETTING, normalizedSettings, this.endpoint); + String newRegion = getCryptoMetadataSettingOrExisting(REGION_SETTING, normalizedSettings, this.region); + String newProxyHost = getCryptoMetadataSettingOrExisting(PROXY_HOST_SETTING, normalizedSettings, this.proxyHost); + int newProxyPort = getCryptoMetadataSettingOrExisting(PROXY_PORT_SETTING, normalizedSettings, this.proxyPort); + TimeValue newReadTimeout = getCryptoMetadataSettingOrExisting( + READ_TIMEOUT_SETTING, + normalizedSettings, + TimeValue.timeValueMillis(this.readTimeoutMillis) + ); + + return new KmsClientSettings( + newCredentials, + newEndpoint, + newRegion, + newProxyHost, + newProxyPort, + newProxyUsername, + newProxyPassword, + (int) newReadTimeout.millis() + ); + } + + private static T getCryptoMetadataSettingOrExisting(Setting setting, Settings normalizedSettings, T defaultValue) { + if (setting.exists(normalizedSettings)) { + return setting.get(normalizedSettings); + } + return defaultValue; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final KmsClientSettings that = (KmsClientSettings) o; + return readTimeoutMillis == that.readTimeoutMillis + && Objects.equals(credentials, that.credentials) + && Objects.equals(endpoint, that.endpoint) + && Objects.equals(region, that.region) + && Objects.equals(proxyHost, that.proxyHost) + && Objects.equals(proxyPort, that.proxyPort) + && Objects.equals(proxyUsername, that.proxyUsername) + && Objects.equals(proxyPassword, that.proxyPassword); + } + + @Override + public int hashCode() { + return Objects.hash(readTimeoutMillis, credentials, endpoint, region, proxyHost, proxyPort, proxyUsername, proxyPassword); + } +} diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsMasterKeyProvider.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsMasterKeyProvider.java new file mode 100644 index 0000000000000..9003e8bebd5ff --- /dev/null +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsMasterKeyProvider.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.services.kms.model.DataKeySpec; +import software.amazon.awssdk.services.kms.model.DecryptRequest; +import software.amazon.awssdk.services.kms.model.DecryptResponse; +import software.amazon.awssdk.services.kms.model.GenerateDataKeyRequest; +import software.amazon.awssdk.services.kms.model.GenerateDataKeyResponse; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.crypto.DataKeyPair; +import org.opensearch.common.crypto.MasterKeyProvider; + +import java.util.Map; +import java.util.function.Supplier; + +public class KmsMasterKeyProvider implements MasterKeyProvider { + private final Map encryptionContext; + private final String keyArn; + private final Supplier clientReferenceSupplier; + + private static final Logger logger = LogManager.getLogger(KmsMasterKeyProvider.class); + + public KmsMasterKeyProvider( + Map encryptionContext, + String keyArn, + Supplier clientReferenceSupplier + ) { + this.encryptionContext = encryptionContext; + this.keyArn = keyArn; + this.clientReferenceSupplier = clientReferenceSupplier; + } + + @Override + public DataKeyPair generateDataPair() { + logger.info("Generating new data key pair"); + try (AmazonKmsClientReference clientReference = clientReferenceSupplier.get()) { + GenerateDataKeyRequest request = GenerateDataKeyRequest.builder() + .encryptionContext(encryptionContext) + // Currently only 32 byte data key is supported. To add support for other key sizes add key providers + // in org.opensearch.encryption.CryptoManagerFactory.createCryptoProvider. + .keySpec(DataKeySpec.AES_256) + .keyId(keyArn) + .build(); + GenerateDataKeyResponse dataKeyPair = SocketAccess.doPrivileged(() -> clientReference.get().generateDataKey(request)); + return new DataKeyPair(dataKeyPair.plaintext().asByteArray(), dataKeyPair.ciphertextBlob().asByteArray()); + } + } + + @Override + public byte[] decryptKey(byte[] encryptedKey) { + try (AmazonKmsClientReference clientReference = clientReferenceSupplier.get()) { + DecryptRequest decryptRequest = DecryptRequest.builder().ciphertextBlob(SdkBytes.fromByteArray(encryptedKey)).build(); + DecryptResponse decryptResponse = SocketAccess.doPrivileged(() -> clientReference.get().decrypt(decryptRequest)); + return decryptResponse.plaintext().asByteArray(); + } + } + + @Override + public String getKeyId() { + return keyArn; + } + + @Override + public Map getEncryptionContext() { + return encryptionContext; + } + + @Override + public void close() {} +} diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsService.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsService.java new file mode 100644 index 0000000000000..108c88bd3bf80 --- /dev/null +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/KmsService.java @@ -0,0 +1,272 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.http.apache.ApacheHttpClient; +import software.amazon.awssdk.http.apache.ProxyConfiguration; +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; +import software.amazon.awssdk.regions.Region; +import software.amazon.awssdk.services.kms.KmsClient; +import software.amazon.awssdk.services.kms.KmsClientBuilder; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.collect.MapBuilder; +import org.opensearch.common.crypto.MasterKeyProvider; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; + +import java.io.Closeable; +import java.net.URI; +import java.net.URISyntaxException; +import java.time.Duration; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Collectors; + +import static java.util.Collections.emptyMap; + +/** + * Service class which exposes APIs for communication with AWS KMS. + */ +public class KmsService implements Closeable { + + private static final Logger logger = LogManager.getLogger(KmsService.class); + private final CredentialProviderFactory credentialProviderFactory; + + static final Setting ENC_CTX_SETTING = Setting.simpleString("kms.encryption_context", Setting.Property.NodeScope); + + static final Setting KEY_ARN_SETTING = Setting.simpleString("kms.key_arn", Setting.Property.NodeScope); + + private volatile Map clientsCache = emptyMap(); + + /** + * Client settings calculated from static configuration and settings in the keystore. + */ + private volatile KmsClientSettings staticClientSettings; + + /** + * Client settings derived from those in {@link #staticClientSettings} by combining them with crypto settings + */ + private volatile Map derivedClientSettings; + + public KmsService() { + credentialProviderFactory = new CredentialProviderFactory(); + } + + private KmsClient buildClient(KmsClientSettings clientSettings) { + SocketAccess.doPrivilegedVoid(KmsService::setDefaultAwsProfilePath); + final AwsCredentialsProvider awsCredentialsProvider = buildCredentials(clientSettings); + final ClientOverrideConfiguration overrideConfiguration = buildOverrideConfiguration(); + final ProxyConfiguration proxyConfiguration = SocketAccess.doPrivileged(() -> buildProxyConfiguration(clientSettings)); + return buildClient( + awsCredentialsProvider, + proxyConfiguration, + overrideConfiguration, + clientSettings.endpoint, + clientSettings.region, + clientSettings.readTimeoutMillis + ); + } + + // proxy for testing + protected KmsClient buildClient( + AwsCredentialsProvider awsCredentialsProvider, + ProxyConfiguration proxyConfiguration, + ClientOverrideConfiguration overrideConfiguration, + String endpoint, + String region, + long readTimeoutMillis + ) { + ApacheHttpClient.Builder clientBuilder = ApacheHttpClient.builder() + .proxyConfiguration(proxyConfiguration) + .socketTimeout(Duration.ofMillis(readTimeoutMillis)); + + KmsClientBuilder builder = KmsClient.builder() + .region(Region.of(region)) + .overrideConfiguration(overrideConfiguration) + .httpClientBuilder(clientBuilder) + .credentialsProvider(awsCredentialsProvider); + + if (Strings.hasText(endpoint)) { + logger.debug("using explicit kms endpoint [{}]", endpoint); + builder.endpointOverride(URI.create(endpoint)); + } + + if (Strings.hasText(region)) { + logger.debug("using explicit kms region [{}]", region); + builder.region(Region.of(region)); + } + + return SocketAccess.doPrivileged(builder::build); + } + + ProxyConfiguration buildProxyConfiguration(KmsClientSettings clientSettings) { + if (Strings.hasText(clientSettings.proxyHost)) { + try { + return ProxyConfiguration.builder() + .endpoint(new URI("https", null, clientSettings.proxyHost, clientSettings.proxyPort, null, null, null)) + .username(clientSettings.proxyUsername) + .password(clientSettings.proxyPassword) + .build(); + } catch (URISyntaxException e) { + throw SdkException.create("Invalid proxy URL", e); + } + } else { + return ProxyConfiguration.builder().build(); + } + } + + ClientOverrideConfiguration buildOverrideConfiguration() { + return ClientOverrideConfiguration.builder().retryPolicy(buildRetryPolicy()).build(); + } + + // pkg private for tests + RetryPolicy buildRetryPolicy() { + // Increase the number of retries in case of 5xx API responses. + // Note that AWS SDK v2 introduced a concept of TokenBucketRetryCondition, which effectively limits retries for + // APIs that have been failing continuously. It allocates tokens (default is 500), which means that once 500 + // retries fail for any API on a bucket, new retries will only be allowed once some retries are rejected. + // https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/core/retry/conditions/TokenBucketRetryCondition.html + RetryPolicy.Builder retryPolicy = RetryPolicy.builder().numRetries(10); + return retryPolicy.build(); + } + + AwsCredentialsProvider buildCredentials(KmsClientSettings clientSettings) { + final AwsCredentials credentials = clientSettings.credentials; + return credentialProviderFactory.createAwsCredentialsProvider(() -> credentials); + } + + public AmazonKmsClientReference client(CryptoMetadata cryptoMetadata) { + final KmsClientSettings clientSettings = settings(cryptoMetadata); + { + final AmazonKmsClientReference clientReference = clientsCache.get(clientSettings); + if (clientReference != null && clientReference.tryIncRef()) { + return clientReference; + } + } + synchronized (this) { + final AmazonKmsClientReference existing = clientsCache.get(clientSettings); + if (existing != null && existing.tryIncRef()) { + return existing; + } + final AmazonKmsClientReference clientReference = new AmazonKmsClientReference( + SocketAccess.doPrivileged(() -> buildClient(clientSettings)) + ); + clientReference.incRef(); + clientsCache = MapBuilder.newMapBuilder(clientsCache).put(clientSettings, clientReference).immutableMap(); + return clientReference; + } + } + + /** + * Either fetches {@link KmsClientSettings} for a given {@link CryptoMetadata} from cached settings or creates them + * by overriding static client settings from {@link #staticClientSettings} with settings found in the crypto metadata. + * @param cryptoMetadata Crypto Metadata + * @return KmsClientSettings + */ + KmsClientSettings settings(CryptoMetadata cryptoMetadata) { + final Settings settings = cryptoMetadata.settings(); + { + final KmsClientSettings existing = derivedClientSettings.get(settings); + if (existing != null) { + return existing; + } + } + synchronized (this) { + final KmsClientSettings existing = derivedClientSettings.get(settings); + if (existing != null) { + return existing; + } + final KmsClientSettings newSettings = staticClientSettings.getMetadataSettings(settings); + derivedClientSettings = MapBuilder.newMapBuilder(derivedClientSettings).put(settings, newSettings).immutableMap(); + return newSettings; + } + } + + /** + * Refreshes the settings for the AmazonKMS client. The new client will be build + * using these new settings. The old client is usable until released. On release it + * will be destroyed instead of being returned to the cache. + */ + public void refreshAndClearCache(KmsClientSettings clientSettings) { + // shutdown all unused clients + // others will shutdown on their respective release + releaseCachedClients(); + this.staticClientSettings = clientSettings; + derivedClientSettings = emptyMap(); + } + + private synchronized void releaseCachedClients() { + // the clients will shutdown when they will not be used anymore + for (final AmazonKmsClientReference clientReference : clientsCache.values()) { + clientReference.decRef(); + } + + // clear previously cached clients, they will be build lazily + clientsCache = emptyMap(); + derivedClientSettings = emptyMap(); + } + + @Override + public void close() { + releaseCachedClients(); + } + + // By default, AWS v2 SDK loads a default profile from $USER_HOME, which is restricted. Use the OpenSearch configuration path instead. + @SuppressForbidden(reason = "Prevent AWS SDK v2 from using ~/.aws/config and ~/.aws/credentials.") + static void setDefaultAwsProfilePath() { + if (ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE.getStringValue().isEmpty()) { + logger.info("setting aws.sharedCredentialsFile={}", System.getProperty("opensearch.path.conf")); + System.setProperty(ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE.property(), System.getProperty("opensearch.path.conf")); + } + if (ProfileFileSystemSetting.AWS_CONFIG_FILE.getStringValue().isEmpty()) { + logger.info("setting aws.sharedCredentialsFile={}", System.getProperty("opensearch.path.conf")); + System.setProperty(ProfileFileSystemSetting.AWS_CONFIG_FILE.property(), System.getProperty("opensearch.path.conf")); + } + } + + public MasterKeyProvider createMasterKeyProvider(CryptoMetadata cryptoMetadata) { + Settings cryptoSettings = Settings.builder().put(cryptoMetadata.settings()).normalizePrefix("kms.").build(); + String keyArn = KEY_ARN_SETTING.get(cryptoSettings); + if (!Strings.hasText(keyArn)) { + throw new IllegalArgumentException("Missing key_arn setting"); + } + + String kmsEncCtx = ENC_CTX_SETTING.get(cryptoSettings); + Map encCtx; + if (Strings.hasText(kmsEncCtx)) { + try { + encCtx = Arrays.stream(kmsEncCtx.split(",")) + .map(s -> s.split("=")) + .collect(Collectors.toMap(e -> e[0].trim(), e -> e[1].trim())); + } catch (Exception ex) { + throw new IllegalArgumentException(ENC_CTX_SETTING.getKey() + " Format should be: Name1=Value1, Name2=Value2"); + } + } else { + encCtx = new HashMap<>(); + } + + // Verify client creation is successful to early detect any failure. + try (AmazonKmsClientReference clientReference = client(cryptoMetadata)) { + clientReference.get(); + } + + return new KmsMasterKeyProvider(encCtx, keyArn, () -> client(cryptoMetadata)); + } +} diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/SocketAccess.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/SocketAccess.java new file mode 100644 index 0000000000000..5b026c30017ca --- /dev/null +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/SocketAccess.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import org.opensearch.SpecialPermission; + +import java.net.SocketPermission; +import java.security.AccessController; +import java.security.PrivilegedAction; + +/** + * This plugin uses aws libraries to connect to Aws services. For these remote calls the plugin needs + * {@link SocketPermission} 'connect' to establish connections. This class wraps the operations requiring access in + * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. + */ +public final class SocketAccess { + + private SocketAccess() {} + + public static T doPrivileged(PrivilegedAction operation) { + SpecialPermission.check(); + return AccessController.doPrivileged(operation); + } + + public static void doPrivilegedVoid(Runnable action) { + SpecialPermission.check(); + AccessController.doPrivileged((PrivilegedAction) () -> { + action.run(); + return null; + }); + } +} diff --git a/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/package-info.java b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/package-info.java new file mode 100644 index 0000000000000..787adc32d8941 --- /dev/null +++ b/plugins/crypto-kms/src/main/java/org/opensearch/crypto/kms/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Crypto plugin to for encryption and decryption use cases. + */ +package org.opensearch.crypto.kms; diff --git a/plugins/crypto-kms/src/main/plugin-metadata/plugin-security.policy b/plugins/crypto-kms/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..46fb79010f9ef --- /dev/null +++ b/plugins/crypto-kms/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + // needed because of problems in ClientConfiguration + // TODO: get these fixed in aws sdk + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.RuntimePermission "getClassLoader"; + permission java.lang.RuntimePermission "setContextClassLoader"; + // Needed because of problems in kms client + // When no region is set on a kms client instance, the + // AWS SDK loads all known partitions from a JSON file and + // uses a Jackson's ObjectMapper for that: this one, in + // version 2.5.3 with the default binding options, tries + // to suppress access checks of ctor/field/method and thus + // requires this special permission. AWS must be fixed to + // uses Jackson correctly and have the correct modifiers + // on binded classes. + // TODO: get these fixed in aws sdk + // See https://github.com/aws/aws-sdk-java/issues/766 + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + + permission java.lang.RuntimePermission "getClassLoader"; + + // kms client opens socket connections for to kms + permission java.net.SocketPermission "*", "connect,resolve"; + + // kms client set Authenticator for proxy username/password + permission java.net.NetPermission "setDefaultAuthenticator"; + + permission java.util.PropertyPermission "aws.sharedCredentialsFile", "read,write"; + permission java.util.PropertyPermission "aws.configFile", "read,write"; + permission java.util.PropertyPermission "aws.region", "read,write"; + permission java.util.PropertyPermission "aws.accessKeyId", "read,write"; + permission java.util.PropertyPermission "aws.secretAccessKey", "read,write"; + permission java.util.PropertyPermission "opensearch.path.conf", "read,write"; + + permission java.io.FilePermission "config", "read"; +}; diff --git a/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/AbstractAwsTestCase.java b/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/AbstractAwsTestCase.java new file mode 100644 index 0000000000000..3fe49f9d3b523 --- /dev/null +++ b/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/AbstractAwsTestCase.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import software.amazon.awssdk.profiles.ProfileFileSystemSetting; + +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.io.PathUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.nio.file.Path; + +public abstract class AbstractAwsTestCase extends OpenSearchTestCase { + @Override + public void setUp() throws Exception { + super.setUp(); + setUpAwsProfile(); + } + + @Override + public void tearDown() throws Exception { + resetAwsProfile(); + super.tearDown(); + } + + private Path configPath() { + return PathUtils.get("config"); + } + + private String previousOpenSearchPathConf; + private String awsRegion; + private String awsAccessKeyId; + private String awsSecretAccessKey; + private String awsSharedCredentialsFile; + private String awsConfigFile; + + @SuppressForbidden(reason = "set predictable aws defaults") + private void setUpAwsProfile() throws Exception { + previousOpenSearchPathConf = SocketAccess.doPrivileged(() -> System.setProperty("opensearch.path.conf", configPath().toString())); + awsRegion = SocketAccess.doPrivileged(() -> System.setProperty("aws.region", "us-west-2")); + awsAccessKeyId = SocketAccess.doPrivileged(() -> System.setProperty("aws.accessKeyId", "aws-access-key-id")); + awsSecretAccessKey = SocketAccess.doPrivileged(() -> System.setProperty("aws.secretAccessKey", "aws-secret-access-key")); + awsSharedCredentialsFile = System.getProperty(ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE.property()); + awsConfigFile = System.getProperty(ProfileFileSystemSetting.AWS_CONFIG_FILE.property()); + SocketAccess.doPrivilegedVoid(KmsService::setDefaultAwsProfilePath); + } + + @SuppressForbidden(reason = "reset aws settings") + private void resetAwsProfile() throws Exception { + resetPropertyValue("opensearch.path.conf", previousOpenSearchPathConf); + resetPropertyValue("aws.region", awsRegion); + resetPropertyValue("aws.accessKeyId", awsAccessKeyId); + resetPropertyValue("aws.secretAccessKey", awsSecretAccessKey); + resetPropertyValue(ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE.property(), awsSharedCredentialsFile); + resetPropertyValue(ProfileFileSystemSetting.AWS_CONFIG_FILE.property(), awsConfigFile); + } + + @SuppressForbidden(reason = "reset aws settings") + private void resetPropertyValue(String key, String value) { + if (value != null) { + SocketAccess.doPrivileged(() -> System.setProperty(key, value)); + } else { + SocketAccess.doPrivileged(() -> System.clearProperty(key)); + } + } +} diff --git a/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/CryptoKmsClientSettingsTests.java b/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/CryptoKmsClientSettingsTests.java new file mode 100644 index 0000000000000..842d85faaa677 --- /dev/null +++ b/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/CryptoKmsClientSettingsTests.java @@ -0,0 +1,164 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; +import software.amazon.awssdk.regions.Region; + +import org.opensearch.common.settings.MockSecureSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.settings.SettingsException; +import org.opensearch.common.unit.TimeValue; + +import java.io.IOException; + +public class CryptoKmsClientSettingsTests extends AbstractAwsTestCase { + + public void testNondefaultClientCreatedBySettingItsSettings() { + final KmsClientSettings settings = KmsClientSettings.getClientSettings( + Settings.builder().put("kms.endpoint", "custom_endpoint").build() + ); + + assertEquals(settings.endpoint, "custom_endpoint"); + // Check if defaults are still present + assertNotNull(settings.proxyHost); + } + + public void testRejectionOfLoneAccessKey() throws IOException { + try (final MockSecureSettings secureSettings = new MockSecureSettings()) { + secureSettings.setString("kms.access_key", "aws_secret"); + final SettingsException e = expectThrows( + SettingsException.class, + () -> KmsClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build()) + ); + assertTrue(e.getMessage().contains("Setting [kms.access_key] is set but [kms.secret_key] is not")); + } + } + + public void testRejectionOfLoneSecretKey() throws IOException { + try (final MockSecureSettings secureSettings = new MockSecureSettings()) { + secureSettings.setString("kms.secret_key", "aws_key"); + final SettingsException e = expectThrows( + SettingsException.class, + () -> KmsClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build()) + ); + assertTrue(e.getMessage().contains("Setting [kms.secret_key] is set but [kms.access_key] is not")); + } + } + + public void testRejectionOfLoneSessionToken() throws IOException { + try (final MockSecureSettings secureSettings = new MockSecureSettings()) { + secureSettings.setString("kms.session_token", "aws_session_token"); + final SettingsException e = expectThrows( + SettingsException.class, + () -> KmsClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build()) + ); + assertTrue(e.getMessage().contains("Setting [kms.session_token] is set but [kms.access_key] and [kms.secret_key] are not")); + } + } + + public void testDefaultEndpoint() { + KmsClientSettings baseSettings = KmsClientSettings.getClientSettings(Settings.EMPTY); + assertEquals(baseSettings.endpoint, ""); + } + + public void testDefaultRegion() { + final Settings settings = Settings.builder().build(); + KmsClientSettings baseSettings = KmsClientSettings.getClientSettings(settings); + assertEquals(baseSettings.region, ""); + } + + public void testSpecificRegion() { + final Settings settings = Settings.builder().put(KmsClientSettings.REGION_SETTING.getKey(), "us-west-2").build(); + KmsClientSettings baseSettings = KmsClientSettings.getClientSettings(settings); + assertEquals(baseSettings.region, Region.US_WEST_2.toString()); + } + + public void testSpecificEndpoint() { + final Settings settings = Settings.builder().put(KmsClientSettings.ENDPOINT_SETTING.getKey(), "kms.endpoint").build(); + KmsClientSettings baseSettings = KmsClientSettings.getClientSettings(settings); + assertEquals(baseSettings.endpoint, "kms.endpoint"); + } + + public void testOverrideWithPrefixedMetadataSettings() { + overrideWithMetadataSettings("kms."); + } + + public void testOverrideWithNoPrefixMetadataSettings() { + overrideWithMetadataSettings(""); + } + + public void overrideWithMetadataSettings(String prefix) { + final MockSecureSettings secureSettings = new MockSecureSettings(); + String accessKey = "access_key", secretKey = "secret_key", sessionToken = "session_token"; + secureSettings.setString("kms.access_key", accessKey); + secureSettings.setString("kms.secret_key", secretKey); + secureSettings.setString("kms.session_token", sessionToken); + final KmsClientSettings baseSettings = KmsClientSettings.getClientSettings( + Settings.builder().setSecureSettings(secureSettings).build() + ); + + { + final KmsClientSettings refinedSettings = baseSettings.getMetadataSettings(Settings.EMPTY); + assertEquals(refinedSettings, baseSettings); + } + + { + final String endpoint = "some.host"; + final KmsClientSettings refinedSettings = baseSettings.getMetadataSettings( + Settings.builder().put(prefix + "endpoint", endpoint).build() + ); + assertEquals(refinedSettings.endpoint, endpoint); + validateCredsAreStillSame(refinedSettings, accessKey, secretKey, sessionToken); + } + + { + String region = "eu-west-1"; + final KmsClientSettings refinedSettings = baseSettings.getMetadataSettings( + Settings.builder().put(prefix + "region", region).build() + ); + assertEquals(refinedSettings.region, region); + validateCredsAreStillSame(refinedSettings, accessKey, secretKey, sessionToken); + } + + { + String proxyHost = "proxy-host"; + final KmsClientSettings refinedSettings = baseSettings.getMetadataSettings( + Settings.builder().put(prefix + "proxy.host", proxyHost).build() + ); + assertEquals(refinedSettings.proxyHost, proxyHost); + validateCredsAreStillSame(refinedSettings, accessKey, secretKey, sessionToken); + } + + { + int proxyPort = 70; + final KmsClientSettings refinedSettings = baseSettings.getMetadataSettings( + Settings.builder().put(prefix + "proxy.port", proxyPort).build() + ); + assertEquals(refinedSettings.proxyPort, proxyPort); + validateCredsAreStillSame(refinedSettings, accessKey, secretKey, sessionToken); + } + + { + TimeValue readTimeout = TimeValue.timeValueMillis(5000); + final KmsClientSettings refinedSettings = baseSettings.getMetadataSettings( + Settings.builder().put(prefix + "read_timeout", readTimeout).build() + ); + assertEquals(refinedSettings.readTimeoutMillis, readTimeout.getMillis()); + validateCredsAreStillSame(refinedSettings, accessKey, secretKey, sessionToken); + } + } + + private void validateCredsAreStillSame(KmsClientSettings refinedSettings, String accessKey, String secretKey, String sessionToken) { + AwsSessionCredentials credentials = (AwsSessionCredentials) refinedSettings.credentials; + assertEquals(credentials.accessKeyId(), accessKey); + assertEquals(credentials.secretAccessKey(), secretKey); + assertEquals(credentials.sessionToken(), sessionToken); + } +} diff --git a/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/KmsServiceTests.java b/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/KmsServiceTests.java new file mode 100644 index 0000000000000..1424cce473592 --- /dev/null +++ b/plugins/crypto-kms/src/test/java/org/opensearch/crypto/kms/KmsServiceTests.java @@ -0,0 +1,255 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto.kms; + +import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; +import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration; +import software.amazon.awssdk.core.retry.RetryPolicy; +import software.amazon.awssdk.http.apache.ProxyConfiguration; +import software.amazon.awssdk.services.kms.KmsClient; + +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.common.settings.MockSecureSettings; +import org.opensearch.common.settings.Settings; + +public class KmsServiceTests extends AbstractAwsTestCase { + private final CryptoMetadata cryptoMetadata = new CryptoMetadata("kp1", "kp2", Settings.EMPTY); + + public void testAWSDefaultConfiguration() { + try (KmsService kmsService = new KmsService()) { + // proxy configuration + final ProxyConfiguration proxyConfiguration = kmsService.buildProxyConfiguration( + KmsClientSettings.getClientSettings(Settings.EMPTY) + ); + + assertNull(proxyConfiguration.scheme()); + assertNull(proxyConfiguration.host()); + assertEquals(proxyConfiguration.port(), 0); + assertNull(proxyConfiguration.username()); + assertNull(proxyConfiguration.password()); + + // retry policy + RetryPolicy retryPolicyConfiguration = SocketAccess.doPrivileged(kmsService::buildRetryPolicy); + + assertEquals(retryPolicyConfiguration.numRetries().intValue(), 10); + + ClientOverrideConfiguration clientOverrideConfiguration = SocketAccess.doPrivileged(kmsService::buildOverrideConfiguration); + assertTrue(clientOverrideConfiguration.retryPolicy().isPresent()); + assertEquals(clientOverrideConfiguration.retryPolicy().get().numRetries().intValue(), 10); + } + } + + public void testAWSConfigurationWithAwsSettings() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("kms.proxy.username", "aws_proxy_username"); + secureSettings.setString("kms.proxy.password", "aws_proxy_password"); + + final Settings settings = Settings.builder() + // NOTE: a host cannot contain the _ character when parsed by URI, hence aws-proxy-host and not aws_proxy_host + .put("kms.proxy.host", "aws-proxy-host") + .put("kms.proxy.port", 8080) + .put("kms.read_timeout", "10s") + .setSecureSettings(secureSettings) + .build(); + + try (KmsService kmsService = new KmsService()) { + // proxy configuration + final ProxyConfiguration proxyConfiguration = SocketAccess.doPrivileged( + () -> kmsService.buildProxyConfiguration(KmsClientSettings.getClientSettings(settings)) + ); + + assertEquals(proxyConfiguration.host(), "aws-proxy-host"); + assertEquals(proxyConfiguration.port(), 8080); + assertEquals(proxyConfiguration.username(), "aws_proxy_username"); + assertEquals(proxyConfiguration.password(), "aws_proxy_password"); + + // retry policy + RetryPolicy retryPolicyConfiguration = SocketAccess.doPrivileged(kmsService::buildRetryPolicy); + assertEquals(retryPolicyConfiguration.numRetries().intValue(), 10); + + ClientOverrideConfiguration clientOverrideConfiguration = SocketAccess.doPrivileged(kmsService::buildOverrideConfiguration); + assertTrue(clientOverrideConfiguration.retryPolicy().isPresent()); + assertEquals(clientOverrideConfiguration.retryPolicy().get().numRetries().intValue(), 10); + } + } + + public void testClientSettingsReInit() { + final MockSecureSettings mockSecure1 = new MockSecureSettings(); + mockSecure1.setString(KmsClientSettings.ACCESS_KEY_SETTING.getKey(), "kms_access_1"); + mockSecure1.setString(KmsClientSettings.SECRET_KEY_SETTING.getKey(), "kms_secret_1"); + final boolean mockSecure1HasSessionToken = randomBoolean(); + if (mockSecure1HasSessionToken) { + mockSecure1.setString(KmsClientSettings.SESSION_TOKEN_SETTING.getKey(), "kms_session_token_1"); + } + mockSecure1.setString(KmsClientSettings.PROXY_USERNAME_SETTING.getKey(), "proxy_username_1"); + mockSecure1.setString(KmsClientSettings.PROXY_PASSWORD_SETTING.getKey(), "proxy_password_1"); + final Settings settings1 = Settings.builder() + .put(KmsClientSettings.PROXY_HOST_SETTING.getKey(), "proxy-host-1") + .put(KmsClientSettings.PROXY_PORT_SETTING.getKey(), 881) + .put(KmsClientSettings.REGION_SETTING.getKey(), "kms_region") + .put(KmsClientSettings.ENDPOINT_SETTING.getKey(), "kms_endpoint_1") + .setSecureSettings(mockSecure1) + .build(); + final MockSecureSettings mockSecure2 = new MockSecureSettings(); + mockSecure2.setString(KmsClientSettings.ACCESS_KEY_SETTING.getKey(), "kms_access_2"); + mockSecure2.setString(KmsClientSettings.SECRET_KEY_SETTING.getKey(), "kms_secret_2"); + final boolean mockSecure2HasSessionToken = randomBoolean(); + if (mockSecure2HasSessionToken) { + mockSecure2.setString(KmsClientSettings.SESSION_TOKEN_SETTING.getKey(), "kms_session_token_2"); + } + mockSecure2.setString(KmsClientSettings.PROXY_USERNAME_SETTING.getKey(), "proxy_username_2"); + mockSecure2.setString(KmsClientSettings.PROXY_PASSWORD_SETTING.getKey(), "proxy_password_2"); + final Settings settings2 = Settings.builder() + .put(KmsClientSettings.PROXY_HOST_SETTING.getKey(), "proxy-host-2") + .put(KmsClientSettings.PROXY_PORT_SETTING.getKey(), 882) + .put(KmsClientSettings.REGION_SETTING.getKey(), "kms_region") + .put(KmsClientSettings.ENDPOINT_SETTING.getKey(), "kms_endpoint_2") + .setSecureSettings(mockSecure2) + .build(); + try (CryptoKmsPluginMockTest plugin = new CryptoKmsPluginMockTest(settings1)) { + try (AmazonKmsClientReference clientReference = plugin.kmsService.client(cryptoMetadata)) { + { + final MockKmsClientTest mockKmsClientTest = (MockKmsClientTest) clientReference.get(); + assertEquals(mockKmsClientTest.endpoint, "kms_endpoint_1"); + + final AwsCredentials credentials = mockKmsClientTest.credentials.resolveCredentials(); + assertEquals(credentials.accessKeyId(), "kms_access_1"); + assertEquals(credentials.secretAccessKey(), "kms_secret_1"); + if (mockSecure1HasSessionToken) { + assertTrue(credentials instanceof AwsSessionCredentials); + assertEquals(((AwsSessionCredentials) credentials).sessionToken(), "kms_session_token_1"); + } else { + assertTrue(credentials instanceof AwsBasicCredentials); + } + + assertEquals( + mockKmsClientTest.proxyConfiguration.toString(), + "ProxyConfiguration(endpoint=https://proxy-host-1:881, username=proxy_username_1, preemptiveBasicAuthenticationEnabled=false)" + ); + assertEquals(mockKmsClientTest.proxyConfiguration.host(), "proxy-host-1"); + assertEquals(mockKmsClientTest.proxyConfiguration.port(), 881); + assertEquals(mockKmsClientTest.proxyConfiguration.username(), "proxy_username_1"); + assertEquals(mockKmsClientTest.proxyConfiguration.password(), "proxy_password_1"); + } + // reload secure settings2 + plugin.reload(settings2); + // client is not released, it is still using the old settings + { + final MockKmsClientTest mockKmsClientTest = (MockKmsClientTest) clientReference.get(); + assertEquals(mockKmsClientTest.endpoint, "kms_endpoint_1"); + + final AwsCredentials credentials = ((MockKmsClientTest) clientReference.get()).credentials.resolveCredentials(); + if (mockSecure1HasSessionToken) { + assertTrue(credentials instanceof AwsSessionCredentials); + assertEquals(((AwsSessionCredentials) credentials).sessionToken(), "kms_session_token_1"); + } else { + assertTrue(credentials instanceof AwsBasicCredentials); + } + + assertEquals( + mockKmsClientTest.proxyConfiguration.toString(), + "ProxyConfiguration(endpoint=https://proxy-host-1:881, username=proxy_username_1, preemptiveBasicAuthenticationEnabled=false)" + ); + assertEquals(mockKmsClientTest.proxyConfiguration.host(), "proxy-host-1"); + assertEquals(mockKmsClientTest.proxyConfiguration.port(), 881); + assertEquals(mockKmsClientTest.proxyConfiguration.username(), "proxy_username_1"); + assertEquals(mockKmsClientTest.proxyConfiguration.password(), "proxy_password_1"); + } + } + try (AmazonKmsClientReference clientReference = plugin.kmsService.client(cryptoMetadata)) { + final MockKmsClientTest mockKmsClientTest = (MockKmsClientTest) clientReference.get(); + assertEquals(mockKmsClientTest.endpoint, "kms_endpoint_2"); + + final AwsCredentials credentials = ((MockKmsClientTest) clientReference.get()).credentials.resolveCredentials(); + assertEquals(credentials.accessKeyId(), "kms_access_2"); + assertEquals(credentials.secretAccessKey(), "kms_secret_2"); + if (mockSecure2HasSessionToken) { + assertTrue(credentials instanceof AwsSessionCredentials); + assertEquals(((AwsSessionCredentials) credentials).sessionToken(), "kms_session_token_2"); + } else { + assertTrue(credentials instanceof AwsBasicCredentials); + } + + assertEquals( + mockKmsClientTest.proxyConfiguration.toString(), + "ProxyConfiguration(endpoint=https://proxy-host-2:882, username=proxy_username_2, preemptiveBasicAuthenticationEnabled=false)" + ); + assertEquals(mockKmsClientTest.proxyConfiguration.host(), "proxy-host-2"); + assertEquals(mockKmsClientTest.proxyConfiguration.port(), 882); + assertEquals(mockKmsClientTest.proxyConfiguration.username(), "proxy_username_2"); + assertEquals(mockKmsClientTest.proxyConfiguration.password(), "proxy_password_2"); + } + } + } + + static class CryptoKmsPluginMockTest extends CryptoKmsPlugin { + + CryptoKmsPluginMockTest(Settings settings) { + super(settings, new KmsService() { + @Override + protected KmsClient buildClient( + AwsCredentialsProvider credentials, + ProxyConfiguration proxyConfiguration, + ClientOverrideConfiguration overrideConfiguration, + String endpoint, + String region, + long readTimeoutMillis + ) { + return new MockKmsClientTest( + credentials, + proxyConfiguration, + overrideConfiguration, + endpoint, + region, + readTimeoutMillis + ); + } + }); + } + } + + static class MockKmsClientTest implements KmsClient { + + String endpoint; + final String region; + final AwsCredentialsProvider credentials; + final ClientOverrideConfiguration clientOverrideConfiguration; + final ProxyConfiguration proxyConfiguration; + final long readTimeoutMillis; + + MockKmsClientTest( + AwsCredentialsProvider credentials, + ProxyConfiguration proxyConfiguration, + ClientOverrideConfiguration clientOverrideConfiguration, + String endpoint, + String region, + long readTimeoutMillis + ) { + this.credentials = credentials; + this.proxyConfiguration = proxyConfiguration; + this.clientOverrideConfiguration = clientOverrideConfiguration; + this.endpoint = endpoint; + this.region = region; + this.readTimeoutMillis = readTimeoutMillis; + } + + @Override + public String serviceName() { + return "kms"; + } + + @Override + public void close() { + // ignore + } + } +} diff --git a/plugins/crypto-kms/src/yamlRestTest/java/org/opensearch/kms/CloudAwsClientYamlTestSuiteIT.java b/plugins/crypto-kms/src/yamlRestTest/java/org/opensearch/kms/CloudAwsClientYamlTestSuiteIT.java new file mode 100644 index 0000000000000..87e6691e40855 --- /dev/null +++ b/plugins/crypto-kms/src/yamlRestTest/java/org/opensearch/kms/CloudAwsClientYamlTestSuiteIT.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.kms; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; +import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; + +public class CloudAwsClientYamlTestSuiteIT extends OpenSearchClientYamlSuiteTestCase { + + public CloudAwsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return OpenSearchClientYamlSuiteTestCase.createParameters(); + } +} diff --git a/plugins/crypto-kms/src/yamlRestTest/resources/rest-api-spec/test/kms/10_basic.yml b/plugins/crypto-kms/src/yamlRestTest/resources/rest-api-spec/test/kms/10_basic.yml new file mode 100644 index 0000000000000..3d6c3056a6975 --- /dev/null +++ b/plugins/crypto-kms/src/yamlRestTest/resources/rest-api-spec/test/kms/10_basic.yml @@ -0,0 +1,16 @@ +# Integration tests for KMS component +# +"KMS loaded": + - skip: + reason: "contains is a newly added assertion" + features: contains + - do: + cluster.state: {} + + # Get cluster-manager node id + - set: { cluster_manager_node: cluster_manager } + + - do: + nodes.info: {} + + - contains: { nodes.$cluster_manager.plugins: { name: crypto-kms } } diff --git a/plugins/discovery-azure-classic/licenses/commons-logging-LICENSE.txt b/plugins/discovery-azure-classic/licenses/commons-logging-LICENSE.txt index d645695673349..57bc88a15a0ee 100644 --- a/plugins/discovery-azure-classic/licenses/commons-logging-LICENSE.txt +++ b/plugins/discovery-azure-classic/licenses/commons-logging-LICENSE.txt @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -200,3 +199,4 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + diff --git a/plugins/discovery-azure-classic/licenses/commons-logging-NOTICE.txt b/plugins/discovery-azure-classic/licenses/commons-logging-NOTICE.txt index d3d6e140ce4f3..72eb32a902458 100644 --- a/plugins/discovery-azure-classic/licenses/commons-logging-NOTICE.txt +++ b/plugins/discovery-azure-classic/licenses/commons-logging-NOTICE.txt @@ -1,5 +1,5 @@ -Apache Commons Logging -Copyright 2003-2014 The Apache Software Foundation +Apache Commons CLI +Copyright 2001-2009 The Apache Software Foundation -This product includes software developed at +This product includes software developed by The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/discovery-ec2/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java index 23070c389a7b1..02e1ff40f7ed6 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java @@ -48,6 +48,7 @@ import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; @@ -91,7 +92,8 @@ protected MockTransportService createTransportService() { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, writableRegistry(), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override public TransportAddress[] addressesFromString(String address) { @@ -99,7 +101,14 @@ public TransportAddress[] addressesFromString(String address) { return new TransportAddress[] { poorMansDNS.getOrDefault(address, buildNewFakeTransportAddress()) }; } }; - return new MockTransportService(Settings.EMPTY, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); + return new MockTransportService( + Settings.EMPTY, + transport, + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + null, + NoopTracer.INSTANCE + ); } protected List buildDynamicHosts(Settings nodeSettings, int nodes) { diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java index c792fe6d96728..ce097667f9c4b 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java @@ -47,6 +47,7 @@ import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.discovery.SeedHostsProvider; import org.opensearch.discovery.SeedHostsResolver; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; @@ -76,11 +77,13 @@ protected MockTransportService createTransportService() { networkService, PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - null + null, + NoopTracer.INSTANCE ); } diff --git a/plugins/discovery-gce/licenses/commons-logging-LICENSE.txt b/plugins/discovery-gce/licenses/commons-logging-LICENSE.txt index d645695673349..57bc88a15a0ee 100644 --- a/plugins/discovery-gce/licenses/commons-logging-LICENSE.txt +++ b/plugins/discovery-gce/licenses/commons-logging-LICENSE.txt @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -200,3 +199,4 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + diff --git a/plugins/discovery-gce/licenses/commons-logging-NOTICE.txt b/plugins/discovery-gce/licenses/commons-logging-NOTICE.txt index d3d6e140ce4f3..72eb32a902458 100644 --- a/plugins/discovery-gce/licenses/commons-logging-NOTICE.txt +++ b/plugins/discovery-gce/licenses/commons-logging-NOTICE.txt @@ -1,5 +1,5 @@ -Apache Commons Logging -Copyright 2003-2014 The Apache Software Foundation +Apache Commons CLI +Copyright 2001-2009 The Apache Software Foundation -This product includes software developed at +This product includes software developed by The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/discovery-gce/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/discovery-gce/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/discovery-gce/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/discovery-gce/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/discovery-gce/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java index c63085deb466f..2208c78bef67a 100644 --- a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java +++ b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java @@ -38,6 +38,7 @@ import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -65,7 +66,6 @@ * compute/v1/projects/[project-id]/zones/[zone] * * By default, project-id is the test method name, lowercase and missing the "test" prefix. - * * For example, if you create a test `myNewAwesomeTest` with following settings: * * Settings nodeSettings = Settings.builder() @@ -74,7 +74,6 @@ * .build(); * * You need to create a file under `src/test/resources/org/opensearch/discovery/gce/` named: - * * compute/v1/projects/mynewawesometest/zones/europe-west1-b/instances.json * */ @@ -109,7 +108,7 @@ public void setProjectName() { @Before public void createTransportService() { - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, NoopTracer.INSTANCE); } @After diff --git a/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java b/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java index 34d3aec2c3d9f..3e729a26223fb 100644 --- a/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java +++ b/plugins/examples/custom-suggester/src/main/java/org/opensearch/example/customsuggester/CustomSuggestion.java @@ -48,6 +48,10 @@ public class CustomSuggestion extends Suggest.Suggestion { + /** + * An integer representing the type of the suggestion formerly used for internal serialization over the network. + * This class is now serialized as a NamedWriteable and this value only remains for backwards compatibility + */ public static final int TYPE = 999; public static final ParseField DUMMY = new ParseField("dummy"); @@ -82,7 +86,7 @@ public int getWriteableType() { /** * A meaningless value used to test that plugin suggesters can add fields to their Suggestion types - * + *

      * This can't be serialized to xcontent because Suggestions appear in xcontent as an array of entries, so there is no place * to add a custom field. But we can still use a custom field internally and use it to define a Suggestion's behavior */ diff --git a/plugins/examples/painless-whitelist/src/main/java/org/opensearch/example/painlesswhitelist/ExamplePainlessAnnotation.java b/plugins/examples/painless-whitelist/src/main/java/org/opensearch/example/painlesswhitelist/ExamplePainlessAnnotation.java index c053ecee333f3..0125566aef508 100644 --- a/plugins/examples/painless-whitelist/src/main/java/org/opensearch/example/painlesswhitelist/ExamplePainlessAnnotation.java +++ b/plugins/examples/painless-whitelist/src/main/java/org/opensearch/example/painlesswhitelist/ExamplePainlessAnnotation.java @@ -32,6 +32,12 @@ package org.opensearch.example.painlesswhitelist; +/** + * An example of an annotation to be allowlisted for use by painless scripts + *

      + * The annotation below is allowlisted for use in search scripts. + * See example_allowlist.txt. + */ public class ExamplePainlessAnnotation { public static final String NAME = "example_annotation"; diff --git a/plugins/examples/painless-whitelist/src/main/java/org/opensearch/example/painlesswhitelist/ExampleStaticMethodClass.java b/plugins/examples/painless-whitelist/src/main/java/org/opensearch/example/painlesswhitelist/ExampleStaticMethodClass.java index 5589f4894c14d..759502a34dd3c 100644 --- a/plugins/examples/painless-whitelist/src/main/java/org/opensearch/example/painlesswhitelist/ExampleStaticMethodClass.java +++ b/plugins/examples/painless-whitelist/src/main/java/org/opensearch/example/painlesswhitelist/ExampleStaticMethodClass.java @@ -32,6 +32,12 @@ package org.opensearch.example.painlesswhitelist; +/** + * An example of a class with static methods to be allowlisted for use by painless scripts + *

      + * The method below is allowlisted for use in search scripts. + * See example_allowlist.txt. + */ public class ExampleStaticMethodClass { public static int exampleAddInts(int x, int y) { return x + y; diff --git a/plugins/examples/painless-whitelist/src/main/java/org/opensearch/example/painlesswhitelist/ExampleWhitelistedClass.java b/plugins/examples/painless-whitelist/src/main/java/org/opensearch/example/painlesswhitelist/ExampleWhitelistedClass.java index 5832a2ee59a85..0d6999d6909d3 100644 --- a/plugins/examples/painless-whitelist/src/main/java/org/opensearch/example/painlesswhitelist/ExampleWhitelistedClass.java +++ b/plugins/examples/painless-whitelist/src/main/java/org/opensearch/example/painlesswhitelist/ExampleWhitelistedClass.java @@ -34,7 +34,7 @@ /** * An example of a class to be allowlisted for use by painless scripts - * + *

      * Each of the members and methods below are allowlisted for use in search scripts. * See example_whitelist.txt. */ diff --git a/plugins/examples/painless-whitelist/src/main/java/org/opensearch/example/painlesswhitelist/ExampleWhitelistedInstance.java b/plugins/examples/painless-whitelist/src/main/java/org/opensearch/example/painlesswhitelist/ExampleWhitelistedInstance.java index 1777a21476fc0..db2105ffa21ec 100644 --- a/plugins/examples/painless-whitelist/src/main/java/org/opensearch/example/painlesswhitelist/ExampleWhitelistedInstance.java +++ b/plugins/examples/painless-whitelist/src/main/java/org/opensearch/example/painlesswhitelist/ExampleWhitelistedInstance.java @@ -32,6 +32,11 @@ package org.opensearch.example.painlesswhitelist; +/** + * An example of an instance to be allowlisted for use by painless scripts. + *

      + * Each of the members and methods below are allowlisted for use in search scripts but only from this instance. + */ public class ExampleWhitelistedInstance { private final int value; diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.75.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.75.jar.sha1 deleted file mode 100644 index 9911bb75f9209..0000000000000 --- a/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.75.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -df22e1b6a9f6b218913f5b68dd16641344397fe0 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.76.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.76.jar.sha1 new file mode 100644 index 0000000000000..2e96c404bef98 --- /dev/null +++ b/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.76.jar.sha1 @@ -0,0 +1 @@ +0cb53f10290a634808555bc4b34328fdab1001f2 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/commons-logging-LICENSE.txt b/plugins/identity-shiro/licenses/commons-logging-LICENSE.txt index d645695673349..57bc88a15a0ee 100644 --- a/plugins/identity-shiro/licenses/commons-logging-LICENSE.txt +++ b/plugins/identity-shiro/licenses/commons-logging-LICENSE.txt @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -200,3 +199,4 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + diff --git a/plugins/identity-shiro/licenses/commons-logging-NOTICE.txt b/plugins/identity-shiro/licenses/commons-logging-NOTICE.txt index 1a45218353e87..72eb32a902458 100644 --- a/plugins/identity-shiro/licenses/commons-logging-NOTICE.txt +++ b/plugins/identity-shiro/licenses/commons-logging-NOTICE.txt @@ -1,5 +1,5 @@ -Apache Commons Logging -Copyright 2003-2016 The Apache Software Foundation +Apache Commons CLI +Copyright 2001-2009 The Apache Software Foundation -This product includes software developed at +This product includes software developed by The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index d33470b9a5f0d..8945c09fca28b 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -57,7 +57,7 @@ dependencies { runtimeOnly "com.google.guava:guava:${versions.guava}" // Other dependencies api 'org.tukaani:xz:1.9' - api 'commons-io:commons-io:2.13.0' + api 'commons-io:commons-io:2.14.0' api "org.slf4j:slf4j-api:${versions.slf4j}" // character set detection @@ -84,16 +84,16 @@ dependencies { // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" // Apple iWork - api 'org.apache.commons:commons-compress:1.22' + api "org.apache.commons:commons-compress:${versions.commonscompress}" // Outlook documents api "org.apache.james:apache-mime4j-core:${versions.mime4j}" api "org.apache.james:apache-mime4j-dom:${versions.mime4j}" // EPUB books - api 'org.apache.commons:commons-lang3:3.13.0' + api "org.apache.commons:commons-lang3:${versions.commonslang}" // Microsoft Word files with visio diagrams api 'org.apache.commons:commons-math3:3.6.1' // POIs dependency - api 'com.zaxxer:SparseBitSet:1.2' + api 'com.zaxxer:SparseBitSet:1.3' } restResources { diff --git a/plugins/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 b/plugins/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 deleted file mode 100644 index 5f1d015b87ac7..0000000000000 --- a/plugins/ingest-attachment/licenses/SparseBitSet-1.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8467c813d442837fcaeddbc42cf5c5359fab4933 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/SparseBitSet-1.3.jar.sha1 b/plugins/ingest-attachment/licenses/SparseBitSet-1.3.jar.sha1 new file mode 100644 index 0000000000000..2803db7c91e30 --- /dev/null +++ b/plugins/ingest-attachment/licenses/SparseBitSet-1.3.jar.sha1 @@ -0,0 +1 @@ +533eac055afe3d5f614ea95e333afd6c2bde8f26 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.75.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.75.jar.sha1 deleted file mode 100644 index e6840a9b02b38..0000000000000 --- a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.75.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b316bcd094e3917b1ece93a6edbab93f8315fb3b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.76.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.76.jar.sha1 new file mode 100644 index 0000000000000..46010d64015ad --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.76.jar.sha1 @@ -0,0 +1 @@ +23d8bcad6b57912e4633ca9955926ffcdf3c5c71 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.75.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.75.jar.sha1 deleted file mode 100644 index 9181b1c3ab1b6..0000000000000 --- a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.75.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f16e5252ad7a46d5eaf255231b0a5da307599082 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.76.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.76.jar.sha1 new file mode 100644 index 0000000000000..a843d972ac681 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.76.jar.sha1 @@ -0,0 +1 @@ +3ee440dfa1c557c1cc0c46b5dadf5ef3896ccebb \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.75.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.75.jar.sha1 deleted file mode 100644 index 9911bb75f9209..0000000000000 --- a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.75.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -df22e1b6a9f6b218913f5b68dd16641344397fe0 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.76.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.76.jar.sha1 new file mode 100644 index 0000000000000..2e96c404bef98 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.76.jar.sha1 @@ -0,0 +1 @@ +0cb53f10290a634808555bc4b34328fdab1001f2 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-LICENSE.txt b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-LICENSE.txt index e1fc4a1506db5..9f27bafe96885 100644 --- a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-LICENSE.txt +++ b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-LICENSE.txt @@ -20,4 +20,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/plugins/ingest-attachment/licenses/commons-compress-1.22.jar.sha1 b/plugins/ingest-attachment/licenses/commons-compress-1.22.jar.sha1 deleted file mode 100644 index 9ab7216c8050a..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-compress-1.22.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -691a8b4e6cf4248c3bc72c8b719337d5cb7359fa \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-compress-1.24.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-compress-1.24.0.jar.sha1 new file mode 100644 index 0000000000000..23999d1bfbde4 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-compress-1.24.0.jar.sha1 @@ -0,0 +1 @@ +b4b1b5a3d9573b2970fddab236102c0a4d27d35e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 deleted file mode 100644 index c165136eb5822..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-io-2.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8bb2bc9b4df17e2411533a0708a69f983bf5e83b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 new file mode 100644 index 0000000000000..33c5cfe53e01d --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-io-2.14.0.jar.sha1 @@ -0,0 +1 @@ +a4c6e1f6c196339473cd2e1b037f0eb97c62755b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-logging-LICENSE.txt b/plugins/ingest-attachment/licenses/commons-logging-LICENSE.txt index d645695673349..57bc88a15a0ee 100644 --- a/plugins/ingest-attachment/licenses/commons-logging-LICENSE.txt +++ b/plugins/ingest-attachment/licenses/commons-logging-LICENSE.txt @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -200,3 +199,4 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + diff --git a/plugins/ingest-attachment/licenses/commons-logging-NOTICE.txt b/plugins/ingest-attachment/licenses/commons-logging-NOTICE.txt index d3d6e140ce4f3..72eb32a902458 100644 --- a/plugins/ingest-attachment/licenses/commons-logging-NOTICE.txt +++ b/plugins/ingest-attachment/licenses/commons-logging-NOTICE.txt @@ -1,5 +1,5 @@ -Apache Commons Logging -Copyright 2003-2014 The Apache Software Foundation +Apache Commons CLI +Copyright 2001-2009 The Apache Software Foundation -This product includes software developed at +This product includes software developed by The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt b/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt index 52055e61de46f..8fda22f4d72f6 100644 --- a/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt +++ b/plugins/ingest-attachment/licenses/slf4j-api-LICENSE.txt @@ -1,21 +1,21 @@ -Copyright (c) 2004-2014 QOS.ch -All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +Copyright (c) 2004-2014 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index 935887261dcc9..952cff96860f2 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/opensearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -73,9 +73,9 @@ import java.util.regex.Pattern; /** A {@link FieldMapper} for full-text fields with annotation markup e.g. - * + *

      * "New mayor is [John Smith](type=person&value=John%20Smith) " - * + *

      * A special Analyzer wraps the default choice of analyzer in order * to strip the text field of annotation markup and inject the related * entity annotation tokens as supplementary tokens at the relevant points diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index b4fa4c87b1393..2695d3f99576d 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -70,7 +70,7 @@ dependencies { api 'org.codehaus.woodstox:stax2-api:4.2.1' implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" runtimeOnly "com.google.guava:guava:${versions.guava}" - api 'org.apache.commons:commons-lang3:3.12.0' + api "org.apache.commons:commons-lang3:${versions.commonslang}" testImplementation project(':test:fixtures:azure-fixture') } diff --git a/plugins/repository-azure/licenses/commons-lang3-3.12.0.jar.sha1 b/plugins/repository-azure/licenses/commons-lang3-3.12.0.jar.sha1 deleted file mode 100644 index 9273d8c01aaba..0000000000000 --- a/plugins/repository-azure/licenses/commons-lang3-3.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6842c86792ff03b9f1d1fe2aab8dc23aa6c6f0e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 b/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 new file mode 100644 index 0000000000000..d0c2f2486ee1f --- /dev/null +++ b/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 @@ -0,0 +1 @@ +b7263237aa89c1f99b327197c41d0669707a462e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/commons-lang3-NOTICE.txt b/plugins/repository-azure/licenses/commons-lang3-NOTICE.txt index 078282451b679..13a3140897472 100644 --- a/plugins/repository-azure/licenses/commons-lang3-NOTICE.txt +++ b/plugins/repository-azure/licenses/commons-lang3-NOTICE.txt @@ -1,8 +1,5 @@ Apache Commons Lang -Copyright 2001-2014 The Apache Software Foundation +Copyright 2001-2019 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). - -This product includes software from the Spring Framework, -under the Apache License 2.0 (see: StringUtils.containsWhitespace()) diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..dfa4a0fbea94c --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +663b1b7bf3ff0f12fde4df20c72d9e94584ebffa \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.96.Final.jar.sha1 deleted file mode 100644 index 42d5e60ce9d45..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -afd90dc0e164be74b4a3e1a899890557fce98567 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..bf5605151406e --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +cbf1a430ea44dbdedbcde16b185cbb95f28d72c7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 deleted file mode 100644 index 2fc787ee65197..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cc8baf4ff67c1bcc0cde60bc5c2bb9447d92d9e6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e9bc8c96aec7 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +a9fbf4d64b08abed542eefd5f7aed4807edca56f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.96.Final.jar.sha1 deleted file mode 100644 index 8e959bdac5079..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f53c52dbddaa4a02a51430405792d3f30a89b147 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..35d9d82202274 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +af3cf676eed30184215426ecf0f0dde15555ea9c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.96.Final.jar.sha1 deleted file mode 100644 index d410208dada90..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dcabd63f4aaec2b4cad7588bfdd4cd2c82287e38 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..0948daa05fff6 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +62dbdf5f25eda75ea8456be1ed72b3fcb0d18774 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.96.Final.jar.sha1 deleted file mode 100644 index 5041cf5473505..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0095023cc667af76578c9be326a6d54e3e1de52c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..30d7758302e37 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 deleted file mode 100644 index e911c47d5ab1a..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -daf8578cade63a01525ee9d70371fa78e6e91094 \ No newline at end of file diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java index 82364d1b7b3c1..986720ec431fe 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -154,7 +154,7 @@ private static class AzureBlobStoreHttpHandler extends AzureHttpHandler implemen /** * HTTP handler that injects random Azure service errors - * + *

      * Note: it is not a good idea to allow this handler to simulate too many errors as it would * slow down the test suite. */ diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java index 65852c4fc5bd0..47a5536a6cd8a 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureRepository.java @@ -47,6 +47,8 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.repositories.blobstore.MeteredBlobStoreRepository; +import java.util.ArrayList; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.function.Function; @@ -114,14 +116,7 @@ public AzureRepository( final ClusterService clusterService, final RecoverySettings recoverySettings ) { - super( - metadata, - COMPRESS_SETTING.get(metadata.settings()), - namedXContentRegistry, - clusterService, - recoverySettings, - buildLocation(metadata) - ); + super(metadata, namedXContentRegistry, clusterService, recoverySettings, buildLocation(metadata)); this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings()); this.storageService = storageService; @@ -192,4 +187,13 @@ protected ByteSizeValue chunkSize() { public boolean isReadOnly() { return readonly; } + + @Override + public List> getRestrictedSystemRepositorySettings() { + List> restrictedSettings = new ArrayList<>(); + restrictedSettings.addAll(super.getRestrictedSystemRepositorySettings()); + restrictedSettings.add(Repository.BASE_PATH_SETTING); + restrictedSettings.add(Repository.LOCATION_MODE_SETTING); + return restrictedSettings; + } } diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java index 9dcc312f8f5a7..b60701ba5e533 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java @@ -212,8 +212,8 @@ protected PasswordAuthentication getPasswordAuthentication() { /** * The location mode is not there in v12 APIs anymore but it is possible to mimic its semantics using - * retry options and combination of primary / secondary endpoints. Refer to migration guide for mode details: - * https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/storage/azure-storage-blob/migrationGuides/V8_V12.md#miscellaneous + * retry options and combination of primary / secondary endpoints. Refer to + * migration guide for mode details: */ private BlobServiceClientBuilder applyLocationMode(final BlobServiceClientBuilder builder, final AzureStorageSettings settings) { final StorageConnectionString storageConnectionString = StorageConnectionString.create(settings.getConnectString(), logger); @@ -335,8 +335,8 @@ private void closeInternally(ClientState state) { } /** - * Implements HTTP pipeline policy to collect statistics on API calls. See please: - * https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/storage/azure-storage-blob/migrationGuides/V8_V12.md#miscellaneous + * Implements HTTP pipeline policy to collect statistics on API calls. See : + * migration guide */ private static class HttpStatsPolicy implements HttpPipelinePolicy { private final BiConsumer statsCollector; diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java index c8be30dbaf865..3356e5174592a 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureRepositorySettingsTests.java @@ -34,16 +34,20 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.BlobStoreTestUtil; import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; +import java.util.List; + import reactor.core.scheduler.Schedulers; import static org.hamcrest.Matchers.is; @@ -179,4 +183,21 @@ public void testChunkSize() { ); } + public void testSystemRepositoryDefault() { + assertThat(azureRepository(Settings.EMPTY).isSystemRepository(), is(false)); + } + + public void testSystemRepositoryOn() { + assertThat(azureRepository(Settings.builder().put("system_repository", true).build()).isSystemRepository(), is(true)); + } + + public void testRestrictedSettingsDefault() { + List> restrictedSettings = azureRepository(Settings.EMPTY).getRestrictedSystemRepositorySettings(); + assertThat(restrictedSettings.size(), is(5)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.READONLY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY)); + assertTrue(restrictedSettings.contains(AzureRepository.Repository.BASE_PATH_SETTING)); + assertTrue(restrictedSettings.contains(AzureRepository.Repository.LOCATION_MODE_SETTING)); + } } diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java index 264888bb7da3a..bb0eafc7d1d4a 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureStorageServiceTests.java @@ -451,7 +451,8 @@ private static RequestRetryPolicy requestRetryOptions(BlobServiceClient client) } /** - * Extract the blob name from a URI like https://myservice.azure.net/container/path/to/myfile + * Extract the blob name from a URI like : + * {@code https://myservice.azure.net/container/path/to/myfile } * It should remove the container part (first part of the path) and gives path/to/myfile * @param uri URI to parse * @return The blob name relative to the container diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 1f4104a929116..2cb9ee63865d5 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -60,14 +60,14 @@ dependencies { api 'com.google.api-client:google-api-client:2.2.0' - api 'com.google.api.grpc:proto-google-common-protos:2.10.0' + api 'com.google.api.grpc:proto-google-common-protos:2.25.1' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" api "com.google.auth:google-auth-library-oauth2-http:${versions.google_auth}" api 'com.google.cloud:google-cloud-core:2.5.10' - api 'com.google.cloud:google-cloud-core-http:2.21.1' + api 'com.google.cloud:google-cloud-core-http:2.23.0' api 'com.google.cloud:google-cloud-storage:1.113.1' api 'com.google.code.gson:gson:2.10.1' @@ -86,7 +86,7 @@ dependencies { api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" api 'org.threeten:threetenbp:1.4.4' - api 'io.grpc:grpc-api:1.57.1' + api 'io.grpc:grpc-api:1.57.2' api 'io.opencensus:opencensus-api:0.31.1' api 'io.opencensus:opencensus-contrib-http-util:0.31.1' diff --git a/plugins/repository-gcs/licenses/commons-logging-LICENSE.txt b/plugins/repository-gcs/licenses/commons-logging-LICENSE.txt index 57bc88a15a0ee..d645695673349 100644 --- a/plugins/repository-gcs/licenses/commons-logging-LICENSE.txt +++ b/plugins/repository-gcs/licenses/commons-logging-LICENSE.txt @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -199,4 +200,3 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - diff --git a/plugins/repository-gcs/licenses/commons-logging-NOTICE.txt b/plugins/repository-gcs/licenses/commons-logging-NOTICE.txt index 72eb32a902458..d3d6e140ce4f3 100644 --- a/plugins/repository-gcs/licenses/commons-logging-NOTICE.txt +++ b/plugins/repository-gcs/licenses/commons-logging-NOTICE.txt @@ -1,5 +1,5 @@ -Apache Commons CLI -Copyright 2001-2009 The Apache Software Foundation +Apache Commons Logging +Copyright 2003-2014 The Apache Software Foundation -This product includes software developed by +This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-2.21.1.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-2.21.1.jar.sha1 deleted file mode 100644 index cc5e7a53098ac..0000000000000 --- a/plugins/repository-gcs/licenses/google-cloud-core-http-2.21.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -88dd2b413dd06826c611e39e6e3259e069f02f66 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-2.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-2.23.0.jar.sha1 new file mode 100644 index 0000000000000..9db3cbcbec35b --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-http-2.23.0.jar.sha1 @@ -0,0 +1 @@ +9913d0806fcfbfbc4a775f29865126ed8465464b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-api-1.57.1.jar.sha1 b/plugins/repository-gcs/licenses/grpc-api-1.57.1.jar.sha1 deleted file mode 100644 index c52d208334070..0000000000000 --- a/plugins/repository-gcs/licenses/grpc-api-1.57.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a7f06d11b65839cf222159b4e947a22eddc59e6 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-api-1.57.2.jar.sha1 b/plugins/repository-gcs/licenses/grpc-api-1.57.2.jar.sha1 new file mode 100644 index 0000000000000..8b320fdd2f9cc --- /dev/null +++ b/plugins/repository-gcs/licenses/grpc-api-1.57.2.jar.sha1 @@ -0,0 +1 @@ +c71a006b81ddae7bc4b7cb1d2da78c1b173761f4 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/repository-gcs/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/repository-gcs/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/repository-gcs/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/repository-gcs/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.10.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.10.0.jar.sha1 deleted file mode 100644 index bf97707836c70..0000000000000 --- a/plugins/repository-gcs/licenses/proto-google-common-protos-2.10.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cf5ac081c05682b0eba6659dee55352fde5852e1 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.1.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.1.jar.sha1 new file mode 100644 index 0000000000000..cd065dabb8e8a --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-2.25.1.jar.sha1 @@ -0,0 +1 @@ +cb90049537b621e39610a110c58ce0b914ee3cc5 \ No newline at end of file diff --git a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index f8e1a1cc39ae0..d223f7989c688 100644 --- a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -298,7 +298,7 @@ private static class GoogleCloudStorageBlobStoreHttpHandler extends GoogleCloudS /** * HTTP handler that injects random Google Cloud Storage service errors - * + *

      * Note: it is not a good idea to allow this handler to simulate too many errors as it would * slow down the test suite. */ diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java index c42cd1802f6e9..f6d078868b875 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -46,6 +46,8 @@ import org.opensearch.repositories.RepositoryException; import org.opensearch.repositories.blobstore.MeteredBlobStoreRepository; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.function.Function; @@ -92,14 +94,7 @@ class GoogleCloudStorageRepository extends MeteredBlobStoreRepository { final ClusterService clusterService, final RecoverySettings recoverySettings ) { - super( - metadata, - getSetting(COMPRESS_SETTING, metadata), - namedXContentRegistry, - clusterService, - recoverySettings, - buildLocation(metadata) - ); + super(metadata, namedXContentRegistry, clusterService, recoverySettings, buildLocation(metadata)); this.storageService = storageService; String basePath = BASE_PATH.get(metadata.settings()); @@ -138,6 +133,15 @@ protected ByteSizeValue chunkSize() { return chunkSize; } + @Override + public List> getRestrictedSystemRepositorySettings() { + List> restrictedSettings = new ArrayList<>(); + restrictedSettings.addAll(super.getRestrictedSystemRepositorySettings()); + restrictedSettings.add(BUCKET); + restrictedSettings.add(BASE_PATH); + return restrictedSettings; + } + /** * Get a given setting from the repository settings, throwing a {@link RepositoryException} if the setting does not exist or is empty. */ diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java index 445e1d65f3d3e..c9ebb3acaf3e5 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java @@ -228,7 +228,7 @@ StorageOptions createStorageOptions( } storageOptionsBuilder.setCredentials(serviceAccountCredentials); } - return storageOptionsBuilder.build(); + return SocketAccess.doPrivilegedException(() -> storageOptionsBuilder.build()); } /** diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java index 197e772df30d5..35127d6ea4060 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/SocketAccess.java @@ -32,6 +32,7 @@ package org.opensearch.repositories.gcs; +import org.apache.logging.log4j.core.util.Throwables; import org.opensearch.SpecialPermission; import org.opensearch.common.CheckedRunnable; @@ -71,4 +72,16 @@ public static void doPrivilegedVoidIOException(CheckedRunnable acti throw (IOException) e.getCause(); } } + + public static T doPrivilegedException(PrivilegedExceptionAction operation) { + SpecialPermission.check(); + try { + return AccessController.doPrivileged(operation); + } catch (PrivilegedActionException e) { + Throwables.rethrow(e.getCause()); + assert false : "always throws"; + return null; + } + } + } diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index afd8f429c9d84..5fb9b276bf657 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -66,14 +66,14 @@ dependencies { } api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" - api 'org.apache.avro:avro:1.11.2' + api 'org.apache.avro:avro:1.11.3' api 'com.google.code.gson:gson:2.10.1' runtimeOnly "com.google.guava:guava:${versions.guava}" api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.2' api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' - api 'org.apache.commons:commons-compress:1.21' + api "org.apache.commons:commons-compress:${versions.commonscompress}" api 'org.apache.commons:commons-configuration2:2.8.0' api 'commons-io:commons-io:2.12.0' api 'org.apache.commons:commons-lang3:3.13.0' @@ -84,7 +84,7 @@ dependencies { api 'net.minidev:json-smart:2.4.11' api "io.netty:netty-all:${versions.netty}" implementation "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" - implementation 'org.codehaus.woodstox:stax2-api:4.2.1' + implementation 'org.codehaus.woodstox:stax2-api:4.2.2' hdfsFixture project(':test:fixtures:hdfs-fixture') // Set the keytab files in the classpath so that we can access them from test code without the security manager diff --git a/plugins/repository-hdfs/licenses/avro-1.11.2.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.2.jar.sha1 deleted file mode 100644 index ce1a894e0ce6d..0000000000000 --- a/plugins/repository-hdfs/licenses/avro-1.11.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -97e62e8be2b37e849f1bdb5a4f08121d47cc9806 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 new file mode 100644 index 0000000000000..fb43ecbcf22c9 --- /dev/null +++ b/plugins/repository-hdfs/licenses/avro-1.11.3.jar.sha1 @@ -0,0 +1 @@ +02b463409b373bff9ece09f54a43d42da5cea55a \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-compress-1.21.jar.sha1 b/plugins/repository-hdfs/licenses/commons-compress-1.21.jar.sha1 deleted file mode 100644 index 81ac609a1aa26..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-compress-1.21.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4ec95b60d4e86b5c95a0e919cb172a0af98011ef \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-compress-1.24.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-compress-1.24.0.jar.sha1 new file mode 100644 index 0000000000000..23999d1bfbde4 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-compress-1.24.0.jar.sha1 @@ -0,0 +1 @@ +b4b1b5a3d9573b2970fddab236102c0a4d27d35e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-lang3-NOTICE.txt b/plugins/repository-hdfs/licenses/commons-lang3-NOTICE.txt index 8dfa22157abc3..13a3140897472 100644 --- a/plugins/repository-hdfs/licenses/commons-lang3-NOTICE.txt +++ b/plugins/repository-hdfs/licenses/commons-lang3-NOTICE.txt @@ -1,9 +1,5 @@ Apache Commons Lang -Copyright 2001-2015 The Apache Software Foundation +Copyright 2001-2019 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). - -This product includes software from the Spring Framework, -under the Apache License 2.0 (see: StringUtils.containsWhitespace()) - diff --git a/plugins/repository-hdfs/licenses/commons-logging-NOTICE.txt b/plugins/repository-hdfs/licenses/commons-logging-NOTICE.txt index 556bd03951d4b..d3d6e140ce4f3 100644 --- a/plugins/repository-hdfs/licenses/commons-logging-NOTICE.txt +++ b/plugins/repository-hdfs/licenses/commons-logging-NOTICE.txt @@ -3,4 +3,3 @@ Copyright 2003-2014 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). - diff --git a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.20.0.jar.sha1 b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.20.0.jar.sha1 deleted file mode 100644 index 800a4aa87ba0e..0000000000000 --- a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7ab4f082fd162f60afcaf2b8744a3d959feab3e8 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.21.0.jar.sha1 b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..0e22f98daa61c --- /dev/null +++ b/plugins/repository-hdfs/licenses/log4j-slf4j-impl-2.21.0.jar.sha1 @@ -0,0 +1 @@ +911fdb5b1a1df36719c579ecc6f2957b88bce1ab \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a9aa34392903e --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +5ef15a3ce29a792b7ad17438e5f84c617b3f2993 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.96.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.96.Final.jar.sha1 deleted file mode 100644 index 32ced5451cfb6..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2145ec747511965e4a57099767654cf9083ce8a7 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 b/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 deleted file mode 100644 index 2c12704cdc560..0000000000000 --- a/plugins/repository-hdfs/licenses/stax2-api-4.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3f7325c52240418c2ba257b103c3c550e140c83 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 b/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 new file mode 100644 index 0000000000000..b15a7ead0d016 --- /dev/null +++ b/plugins/repository-hdfs/licenses/stax2-api-4.2.2.jar.sha1 @@ -0,0 +1 @@ +b0d746cadea928e5264f2ea294ea9a1bf815bbde \ No newline at end of file diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java index b28d28d76cfde..f0ffec5713c1d 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsRepository.java @@ -83,7 +83,7 @@ public HdfsRepository( final ClusterService clusterService, final RecoverySettings recoverySettings ) { - super(metadata, COMPRESS_SETTING.get(metadata.settings()), namedXContentRegistry, clusterService, recoverySettings); + super(metadata, namedXContentRegistry, clusterService, recoverySettings); this.environment = environment; this.chunkSize = metadata.settings().getAsBytesSize("chunk_size", null); diff --git a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java index 2e62ae48d1a06..6f390ffafa925 100644 --- a/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java +++ b/plugins/repository-hdfs/src/main/java/org/opensearch/repositories/hdfs/HdfsSecurityContext.java @@ -53,7 +53,7 @@ /** * Oversees all the security specific logic for the HDFS Repository plugin. - * + *

      * Keeps track of the current user for a given repository, as well as which * permissions to grant the blob store restricted execution methods. */ diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java index 0df39636b8ffa..6ff18b20036a8 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java @@ -66,4 +66,8 @@ protected Settings repositorySettings() { protected Collection> nodePlugins() { return Collections.singletonList(HdfsPlugin.class); } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9513") + @Override + public void testReadRange() {} } diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java index 2758bd020e979..856cdf1eb565e 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java @@ -42,7 +42,7 @@ * thread leaks out of the client and is picked up by the test framework. This thread filter is meant * to ignore the offending thread until a version of Hadoop is released that addresses the incorrect * interrupt handling. - * + *

      * In Hadoop 3.3.6, the org.apache.hadoop.fs.statistics.impl.EvaluatingStatisticsMap uses ForkJoinPool * to perform statistics calculation, leaving dangling workers. * diff --git a/plugins/repository-s3/licenses/commons-logging-LICENSE.txt b/plugins/repository-s3/licenses/commons-logging-LICENSE.txt index 57bc88a15a0ee..d645695673349 100644 --- a/plugins/repository-s3/licenses/commons-logging-LICENSE.txt +++ b/plugins/repository-s3/licenses/commons-logging-LICENSE.txt @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -199,4 +200,3 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - diff --git a/plugins/repository-s3/licenses/commons-logging-NOTICE.txt b/plugins/repository-s3/licenses/commons-logging-NOTICE.txt index 72eb32a902458..d3d6e140ce4f3 100644 --- a/plugins/repository-s3/licenses/commons-logging-NOTICE.txt +++ b/plugins/repository-s3/licenses/commons-logging-NOTICE.txt @@ -1,5 +1,5 @@ -Apache Commons CLI -Copyright 2001-2009 The Apache Software Foundation +Apache Commons Logging +Copyright 2003-2014 The Apache Software Foundation -This product includes software developed by +This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/plugins/repository-s3/licenses/log4j-1.2-api-2.20.0.jar.sha1 b/plugins/repository-s3/licenses/log4j-1.2-api-2.20.0.jar.sha1 deleted file mode 100644 index 9829576d38ce0..0000000000000 --- a/plugins/repository-s3/licenses/log4j-1.2-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -689151374756cb809cb029f2501015bdc7733179 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/log4j-1.2-api-2.21.0.jar.sha1 b/plugins/repository-s3/licenses/log4j-1.2-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..39d9177cb2fac --- /dev/null +++ b/plugins/repository-s3/licenses/log4j-1.2-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +12bad3819a9570807f3c97315930699584c12152 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..aaf2e35302d77 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.96.Final.jar.sha1 deleted file mode 100644 index 7abdb33dc79a2..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b80fffbe77485b457bf844289bf1801f61b9e91 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a77333ea8ae47 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.96.Final.jar.sha1 deleted file mode 100644 index 8fdb32be1de0b..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9cfe430f8b14e7ba86969d8e1126aa0aae4d18f0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..6f26bf4e6a9b5 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.96.Final.jar.sha1 deleted file mode 100644 index dfb0cf39463e2..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a4d0d95df5026965c454902ef3d6d84b81f89626 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..bf5605151406e --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +cbf1a430ea44dbdedbcde16b185cbb95f28d72c7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 deleted file mode 100644 index 2fc787ee65197..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cc8baf4ff67c1bcc0cde60bc5c2bb9447d92d9e6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..d2ff72db60d1f --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.96.Final.jar.sha1 deleted file mode 100644 index 85b5f52749671..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d10c167623cbc471753f950846df241d1021655c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..f12a6046e96d0 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.96.Final.jar.sha1 deleted file mode 100644 index fe4f48c68e78b..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7840d7523d709e02961b647546f9d9dde1699306 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e4179ba15942 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.96.Final.jar.sha1 deleted file mode 100644 index 9e93f013226cd..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0e51db5568a881e0f9b013b35617c597dc32f130 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..ab2819da570fd --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.96.Final.jar.sha1 deleted file mode 100644 index 707285d3d29c3..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dbd15ca244be28e1a98ed29b9d755edbfa737e02 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..5805fdaf411d1 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +78489936ca1d91483e34a31d04a3b0812386eb39 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.96.Final.jar.sha1 deleted file mode 100644 index 58564d9da4b27..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b0369501645f6e71f89ff7f77b5c5f52510a2e31 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..30d7758302e37 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +648ff5571022dbfa6789122e3872477bbf67fa7b \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 deleted file mode 100644 index e911c47d5ab1a..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -daf8578cade63a01525ee9d70371fa78e6e91094 \ No newline at end of file diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index 4bd67e66ebcbd..f00cda7bd36ec 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -38,7 +38,9 @@ import software.amazon.awssdk.core.internal.http.pipeline.stages.ApplyTransactionIdStage; import org.opensearch.action.ActionRunnable; +import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SuppressForbidden; @@ -57,13 +59,17 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryData; +import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.RepositoryStats; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase; import org.opensearch.repositories.s3.utils.AwsRequestSigner; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotsService; import org.opensearch.snapshots.mockstore.BlobStoreWrapper; +import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; @@ -73,12 +79,18 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.stream.StreamSupport; import fixture.s3.S3HttpHandler; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThan; @@ -216,6 +228,67 @@ public void testEnforcedCooldownPeriod() throws IOException { assertThat(repository.threadPool().relativeTimeInNanos() - beforeFastDelete, lessThan(TEST_COOLDOWN_PERIOD.getNanos())); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10735") + @Override + public void testRequestStats() throws Exception { + final String repository = createRepository(randomName()); + final String index = "index-no-merges"; + createIndex( + index, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build() + ); + + final long nbDocs = randomLongBetween(10_000L, 20_000L); + try (BackgroundIndexer indexer = new BackgroundIndexer(index, "_doc", client(), (int) nbDocs)) { + waitForDocs(nbDocs, indexer); + } + + flushAndRefresh(index); + ForceMergeResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); + assertThat(forceMerge.getSuccessfulShards(), equalTo(1)); + assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs); + + final String snapshot = "snapshot"; + assertSuccessfulSnapshot( + client().admin().cluster().prepareCreateSnapshot(repository, snapshot).setWaitForCompletion(true).setIndices(index) + ); + + assertAcked(client().admin().indices().prepareDelete(index)); + + assertSuccessfulRestore(client().admin().cluster().prepareRestoreSnapshot(repository, snapshot).setWaitForCompletion(true)); + ensureGreen(index); + assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs); + + assertAcked(client().admin().cluster().prepareDeleteSnapshot(repository, snapshot).get()); + + final RepositoryStats repositoryStats = StreamSupport.stream( + internalCluster().getInstances(RepositoriesService.class).spliterator(), + false + ).map(repositoriesService -> { + try { + return repositoriesService.repository(repository); + } catch (RepositoryMissingException e) { + return null; + } + }).filter(Objects::nonNull).map(Repository::stats).reduce(RepositoryStats::merge).get(); + + Map> extendedStats = repositoryStats.extendedStats; + Map aggregatedStats = new HashMap<>(); + extendedStats.forEach((k, v) -> { + if (k == BlobStore.Metric.RETRY_COUNT || k == BlobStore.Metric.REQUEST_SUCCESS || k == BlobStore.Metric.REQUEST_FAILURE) { + for (Map.Entry entry : v.entrySet()) { + aggregatedStats.merge(entry.getKey(), entry.getValue(), Math::addExact); + } + } + + }); + final Map mockCalls = getMockRequestCounts(); + + String assertionErrorMsg = String.format("SDK sent [%s] calls and handler measured [%s] calls", aggregatedStats, mockCalls); + + assertEquals(assertionErrorMsg, mockCalls, aggregatedStats); + } + /** * S3RepositoryPlugin that allows to disable chunked encoding and to set a low threshold between single upload and multipart upload. */ @@ -239,7 +312,7 @@ protected S3Repository createRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, false) { + return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, null, false) { @Override public BlobStore blobStore() { @@ -289,7 +362,7 @@ private void validateAuthHeader(HttpExchange exchange) { /** * HTTP handler that injects random S3 service errors - * + *

      * Note: it is not a good idea to allow this handler to simulate too many errors as it would * slow down the test suite. */ @@ -327,6 +400,8 @@ public void maybeTrack(final String request, Headers requestHeaders) { trackRequest("PutMultipartObject"); } else if (Regex.simpleMatch("PUT /*/*", request)) { trackRequest("PutObject"); + } else if (Regex.simpleMatch("POST /*?delete*", request)) { + trackRequest("DeleteObjects"); } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java index 0b5fcb6df280e..45170ea1ad209 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3Reference.java @@ -29,6 +29,7 @@ public class AmazonAsyncS3Reference extends RefCountedReleasable { client.client().close(); client.priorityClient().close(); + client.urgentClient().close(); AwsCredentialsProvider credentials = client.credentials(); if (credentials instanceof Closeable) { try { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java index fa2db83729d25..f8a313b55d945 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonAsyncS3WithCredentials.java @@ -19,16 +19,19 @@ final class AmazonAsyncS3WithCredentials { private final S3AsyncClient client; private final S3AsyncClient priorityClient; + private final S3AsyncClient urgentClient; private final AwsCredentialsProvider credentials; private AmazonAsyncS3WithCredentials( final S3AsyncClient client, final S3AsyncClient priorityClient, + final S3AsyncClient urgentClient, @Nullable final AwsCredentialsProvider credentials ) { this.client = client; this.credentials = credentials; this.priorityClient = priorityClient; + this.urgentClient = urgentClient; } S3AsyncClient client() { @@ -39,6 +42,10 @@ S3AsyncClient priorityClient() { return priorityClient; } + S3AsyncClient urgentClient() { + return urgentClient; + } + AwsCredentialsProvider credentials() { return credentials; } @@ -46,8 +53,9 @@ AwsCredentialsProvider credentials() { static AmazonAsyncS3WithCredentials create( final S3AsyncClient client, final S3AsyncClient priorityClient, + final S3AsyncClient urgentClient, @Nullable final AwsCredentialsProvider credentials ) { - return new AmazonAsyncS3WithCredentials(client, priorityClient, credentials); + return new AmazonAsyncS3WithCredentials(client, priorityClient, urgentClient, credentials); } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java index 08215ebdd45e0..262304029a0d3 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java @@ -103,6 +103,7 @@ public synchronized void refreshAndClearCache(Map clie */ public AmazonAsyncS3Reference client( RepositoryMetadata repositoryMetadata, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { @@ -119,7 +120,7 @@ public AmazonAsyncS3Reference client( return existing; } final AmazonAsyncS3Reference clientReference = new AmazonAsyncS3Reference( - buildClient(clientSettings, priorityExecutorBuilder, normalExecutorBuilder) + buildClient(clientSettings, urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder) ); clientReference.incRef(); clientsCache = MapBuilder.newMapBuilder(clientsCache).put(clientSettings, clientReference).immutableMap(); @@ -165,6 +166,7 @@ S3ClientSettings settings(RepositoryMetadata repositoryMetadata) { // proxy for testing synchronized AmazonAsyncS3WithCredentials buildClient( final S3ClientSettings clientSettings, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { @@ -195,6 +197,17 @@ synchronized AmazonAsyncS3WithCredentials buildClient( builder.forcePathStyle(true); } + builder.httpClient(buildHttpClient(clientSettings, urgentExecutorBuilder.getAsyncTransferEventLoopGroup())); + builder.asyncConfiguration( + ClientAsyncConfiguration.builder() + .advancedOption( + SdkAdvancedAsyncClientOption.FUTURE_COMPLETION_EXECUTOR, + urgentExecutorBuilder.getFutureCompletionExecutor() + ) + .build() + ); + final S3AsyncClient urgentClient = SocketAccess.doPrivileged(builder::build); + builder.httpClient(buildHttpClient(clientSettings, priorityExecutorBuilder.getAsyncTransferEventLoopGroup())); builder.asyncConfiguration( ClientAsyncConfiguration.builder() @@ -217,7 +230,7 @@ synchronized AmazonAsyncS3WithCredentials buildClient( ); final S3AsyncClient client = SocketAccess.doPrivileged(builder::build); - return AmazonAsyncS3WithCredentials.create(client, priorityClient, credentials); + return AmazonAsyncS3WithCredentials.create(client, priorityClient, urgentClient, credentials); } static ClientOverrideConfiguration buildOverrideConfiguration(final S3ClientSettings clientSettings) { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index a97a509adce47..c1180aab0e0c7 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -32,6 +32,8 @@ package org.opensearch.repositories.s3; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.core.sync.RequestBody; import software.amazon.awssdk.services.s3.S3AsyncClient; @@ -44,10 +46,15 @@ import software.amazon.awssdk.services.s3.model.Delete; import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesRequest; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesResponse; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; import software.amazon.awssdk.services.s3.model.HeadObjectRequest; import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; import software.amazon.awssdk.services.s3.model.NoSuchKeyException; +import software.amazon.awssdk.services.s3.model.ObjectAttributes; import software.amazon.awssdk.services.s3.model.ObjectIdentifier; import software.amazon.awssdk.services.s3.model.PutObjectRequest; import software.amazon.awssdk.services.s3.model.S3Error; @@ -63,22 +70,26 @@ import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; import org.opensearch.common.StreamContext; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStoreException; import org.opensearch.common.blobstore.DeleteResult; -import org.opensearch.common.blobstore.VerifyingMultiStreamBlobContainer; +import org.opensearch.common.blobstore.stream.read.ReadContext; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.support.AbstractBlobContainer; import org.opensearch.common.blobstore.support.PlainBlobMetadata; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.io.InputStreamContainer; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.repositories.s3.async.UploadRequest; +import org.opensearch.repositories.s3.utils.HttpRangeUtils; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -98,17 +109,10 @@ import static org.opensearch.repositories.s3.S3Repository.MAX_FILE_SIZE_USING_MULTIPART; import static org.opensearch.repositories.s3.S3Repository.MIN_PART_SIZE_USING_MULTIPART; -class S3BlobContainer extends AbstractBlobContainer implements VerifyingMultiStreamBlobContainer { +class S3BlobContainer extends AbstractBlobContainer implements AsyncMultiStreamBlobContainer { private static final Logger logger = LogManager.getLogger(S3BlobContainer.class); - /** - * Maximum number of deletes in a {@link DeleteObjectsRequest}. - * - * @see S3 Documentation. - */ - private static final int MAX_BULK_DELETES = 1000; - private final S3BlobStore blobStore; private final String keyPath; @@ -191,11 +195,16 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener comp StreamContext streamContext = SocketAccess.doPrivileged(() -> writeContext.getStreamProvider(partSize)); try (AmazonAsyncS3Reference amazonS3Reference = SocketAccess.doPrivileged(blobStore::asyncClientReference)) { - S3AsyncClient s3AsyncClient = writeContext.getWritePriority() == WritePriority.HIGH - ? amazonS3Reference.get().priorityClient() - : amazonS3Reference.get().client(); + S3AsyncClient s3AsyncClient; + if (writeContext.getWritePriority() == WritePriority.URGENT) { + s3AsyncClient = amazonS3Reference.get().urgentClient(); + } else if (writeContext.getWritePriority() == WritePriority.HIGH) { + s3AsyncClient = amazonS3Reference.get().priorityClient(); + } else { + s3AsyncClient = amazonS3Reference.get().client(); + } CompletableFuture completableFuture = blobStore.getAsyncTransferManager() - .uploadObject(s3AsyncClient, uploadRequest, streamContext); + .uploadObject(s3AsyncClient, uploadRequest, streamContext, blobStore.getStatsMetricPublisher()); completableFuture.whenComplete((response, throwable) -> { if (throwable == null) { completionListener.onResponse(response); @@ -211,6 +220,56 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener comp } } + @ExperimentalApi + @Override + public void readBlobAsync(String blobName, ActionListener listener) { + try (AmazonAsyncS3Reference amazonS3Reference = SocketAccess.doPrivileged(blobStore::asyncClientReference)) { + final S3AsyncClient s3AsyncClient = amazonS3Reference.get().client(); + final String bucketName = blobStore.bucket(); + final String blobKey = buildKey(blobName); + + final CompletableFuture blobMetadataFuture = getBlobMetadata(s3AsyncClient, bucketName, blobKey); + + blobMetadataFuture.whenComplete((blobMetadata, throwable) -> { + if (throwable != null) { + Exception ex = throwable.getCause() instanceof Exception + ? (Exception) throwable.getCause() + : new Exception(throwable.getCause()); + listener.onFailure(ex); + return; + } + + try { + final List blobPartInputStreamFutures = new ArrayList<>(); + final long blobSize = blobMetadata.objectSize(); + final Integer numberOfParts = blobMetadata.objectParts() == null ? null : blobMetadata.objectParts().totalPartsCount(); + final String blobChecksum = blobMetadata.checksum() == null ? null : blobMetadata.checksum().checksumCRC32(); + + if (numberOfParts == null) { + blobPartInputStreamFutures.add(() -> getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobKey, null)); + } else { + // S3 multipart files use 1 to n indexing + for (int partNumber = 1; partNumber <= numberOfParts; partNumber++) { + final int innerPartNumber = partNumber; + blobPartInputStreamFutures.add( + () -> getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobKey, innerPartNumber) + ); + } + } + listener.onResponse(new ReadContext(blobSize, blobPartInputStreamFutures, blobChecksum)); + } catch (Exception ex) { + listener.onFailure(ex); + } + }); + } catch (Exception ex) { + listener.onFailure(SdkException.create("Error occurred while fetching blob parts from the repository", ex)); + } + } + + public boolean remoteIntegrityCheckSupported() { + return true; + } + // package private for testing long getLargeBlobThresholdInBytes() { return blobStore.bufferSizeInBytes(); @@ -278,12 +337,12 @@ private void doDeleteBlobs(List blobNames, boolean relative) throws IOEx outstanding = new HashSet<>(blobNames); } try (AmazonS3Reference clientReference = blobStore.clientReference()) { - // S3 API only allows 1k blobs per delete so we split up the given blobs into requests of max. 1k deletes + // S3 API allows 1k blobs per delete so we split up the given blobs into requests of bulk size deletes final List deleteRequests = new ArrayList<>(); final List partition = new ArrayList<>(); for (String key : outstanding) { partition.add(key); - if (partition.size() == MAX_BULK_DELETES) { + if (partition.size() == blobStore.getBulkDeletesSize()) { deleteRequests.add(bulkDelete(blobStore.bucket(), partition)); partition.clear(); } @@ -330,7 +389,7 @@ private void doDeleteBlobs(List blobNames, boolean relative) throws IOEx assert outstanding.isEmpty(); } - private static DeleteObjectsRequest bulkDelete(String bucket, List blobs) { + private DeleteObjectsRequest bulkDelete(String bucket, List blobs) { return DeleteObjectsRequest.builder() .bucket(bucket) .delete( @@ -339,21 +398,18 @@ private static DeleteObjectsRequest bulkDelete(String bucket, List blobs .quiet(true) .build() ) + .overrideConfiguration(o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().deleteObjectsMetricPublisher)) .build(); } @Override - public void listBlobsByPrefixInSortedOrder( - String blobNamePrefix, - int limit, - BlobNameSortOrder blobNameSortOrder, - ActionListener> listener - ) { + public List listBlobsByPrefixInSortedOrder(String blobNamePrefix, int limit, BlobNameSortOrder blobNameSortOrder) + throws IOException { // As AWS S3 returns list of keys in Lexicographic order, we don't have to fetch all the keys in order to sort them // We fetch only keys as per the given limit to optimize the fetch. If provided sort order is not Lexicographic, // we fall-back to default implementation of fetching all the keys and sorting them. if (blobNameSortOrder != BlobNameSortOrder.LEXICOGRAPHIC) { - super.listBlobsByPrefixInSortedOrder(blobNamePrefix, limit, blobNameSortOrder, listener); + return super.listBlobsByPrefixInSortedOrder(blobNamePrefix, limit, blobNameSortOrder); } else { if (limit < 0) { throw new IllegalArgumentException("limit should not be a negative value"); @@ -364,9 +420,9 @@ public void listBlobsByPrefixInSortedOrder( .flatMap(listing -> listing.contents().stream()) .map(s3Object -> new PlainBlobMetadata(s3Object.key().substring(keyPath.length()), s3Object.size())) .collect(Collectors.toList()); - listener.onResponse(blobs.subList(0, Math.min(limit, blobs.size()))); + return blobs.subList(0, Math.min(limit, blobs.size())); } catch (final Exception e) { - listener.onFailure(new IOException("Exception when listing blobs by prefix [" + prefix + "]", e)); + throw new IOException("Exception when listing blobs by prefix [" + prefix + "]", e); } } } @@ -627,4 +683,71 @@ static Tuple numberOfMultiparts(final long totalSize, final long par return Tuple.tuple(parts + 1, remaining); } } + + /** + * Fetches a part of the blob from the S3 bucket and transforms it to an {@link InputStreamContainer}, which holds + * the stream and its related metadata. + * @param s3AsyncClient Async client to be utilized to fetch the object part + * @param bucketName Name of the S3 bucket + * @param blobKey Identifier of the blob for which the parts will be fetched + * @param partNumber Optional part number for the blob to be retrieved + * @return A future of {@link InputStreamContainer} containing the stream and stream metadata. + */ + CompletableFuture getBlobPartInputStreamContainer( + S3AsyncClient s3AsyncClient, + String bucketName, + String blobKey, + @Nullable Integer partNumber + ) { + final boolean isMultipartObject = partNumber != null; + final GetObjectRequest.Builder getObjectRequestBuilder = GetObjectRequest.builder().bucket(bucketName).key(blobKey); + + if (isMultipartObject) { + getObjectRequestBuilder.partNumber(partNumber); + } + + return SocketAccess.doPrivileged( + () -> s3AsyncClient.getObject(getObjectRequestBuilder.build(), AsyncResponseTransformer.toBlockingInputStream()) + .thenApply(response -> transformResponseToInputStreamContainer(response, isMultipartObject)) + ); + } + + /** + * Transforms the stream response object from S3 into an {@link InputStreamContainer} + * @param streamResponse Response stream object from S3 + * @param isMultipartObject Flag to denote a multipart object response + * @return {@link InputStreamContainer} containing the stream and stream metadata + */ + // Package-Private for testing. + static InputStreamContainer transformResponseToInputStreamContainer( + ResponseInputStream streamResponse, + boolean isMultipartObject + ) { + final GetObjectResponse getObjectResponse = streamResponse.response(); + final String contentRange = getObjectResponse.contentRange(); + final Long contentLength = getObjectResponse.contentLength(); + if ((isMultipartObject && contentRange == null) || contentLength == null) { + throw SdkException.builder().message("Failed to fetch required metadata for blob part").build(); + } + final long offset = isMultipartObject ? HttpRangeUtils.getStartOffsetFromRangeHeader(getObjectResponse.contentRange()) : 0L; + return new InputStreamContainer(streamResponse, getObjectResponse.contentLength(), offset); + } + + /** + * Retrieves the metadata like checksum, object size and parts for the provided blob within the S3 bucket. + * @param s3AsyncClient Async client to be utilized to fetch the metadata + * @param bucketName Name of the S3 bucket + * @param blobName Identifier of the blob for which the metadata will be fetched + * @return A future containing the metadata within {@link GetObjectAttributesResponse} + */ + CompletableFuture getBlobMetadata(S3AsyncClient s3AsyncClient, String bucketName, String blobName) { + // Fetch blob metadata - part info, size, checksum + final GetObjectAttributesRequest getObjectAttributesRequest = GetObjectAttributesRequest.builder() + .bucket(bucketName) + .key(blobName) + .objectAttributes(ObjectAttributes.CHECKSUM, ObjectAttributes.OBJECT_SIZE, ObjectAttributes.OBJECT_PARTS) + .build(); + + return SocketAccess.doPrivileged(() -> s3AsyncClient.getObjectAttributes(getObjectAttributesRequest)); + } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java index 8a5b92d71bb45..e8e043357e126 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java @@ -47,9 +47,18 @@ import org.opensearch.repositories.s3.async.AsyncTransferManager; import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; import java.util.Locale; import java.util.Map; +import static org.opensearch.repositories.s3.S3Repository.BUCKET_SETTING; +import static org.opensearch.repositories.s3.S3Repository.BUFFER_SIZE_SETTING; +import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE; +import static org.opensearch.repositories.s3.S3Repository.CANNED_ACL_SETTING; +import static org.opensearch.repositories.s3.S3Repository.SERVER_SIDE_ENCRYPTION_SETTING; +import static org.opensearch.repositories.s3.S3Repository.STORAGE_CLASS_SETTING; + class S3BlobStore implements BlobStore { private static final Logger logger = LogManager.getLogger(S3BlobStore.class); @@ -58,21 +67,24 @@ class S3BlobStore implements BlobStore { private final S3AsyncService s3AsyncService; - private final String bucket; + private volatile String bucket; + + private volatile ByteSizeValue bufferSize; - private final ByteSizeValue bufferSize; + private volatile boolean serverSideEncryption; - private final boolean serverSideEncryption; + private volatile ObjectCannedACL cannedACL; - private final ObjectCannedACL cannedACL; + private volatile StorageClass storageClass; - private final StorageClass storageClass; + private volatile int bulkDeletesSize; - private final RepositoryMetadata repositoryMetadata; + private volatile RepositoryMetadata repositoryMetadata; private final StatsMetricPublisher statsMetricPublisher = new StatsMetricPublisher(); private final AsyncTransferManager asyncTransferManager; + private final AsyncExecutorContainer urgentExecutorBuilder; private final AsyncExecutorContainer priorityExecutorBuilder; private final AsyncExecutorContainer normalExecutorBuilder; private final boolean multipartUploadEnabled; @@ -86,8 +98,10 @@ class S3BlobStore implements BlobStore { ByteSizeValue bufferSize, String cannedACL, String storageClass, + int bulkDeletesSize, RepositoryMetadata repositoryMetadata, AsyncTransferManager asyncTransferManager, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { @@ -99,14 +113,23 @@ class S3BlobStore implements BlobStore { this.bufferSize = bufferSize; this.cannedACL = initCannedACL(cannedACL); this.storageClass = initStorageClass(storageClass); + this.bulkDeletesSize = bulkDeletesSize; this.repositoryMetadata = repositoryMetadata; this.asyncTransferManager = asyncTransferManager; this.normalExecutorBuilder = normalExecutorBuilder; this.priorityExecutorBuilder = priorityExecutorBuilder; + this.urgentExecutorBuilder = urgentExecutorBuilder; } - public boolean isMultipartUploadEnabled() { - return multipartUploadEnabled; + @Override + public void reload(RepositoryMetadata repositoryMetadata) { + this.repositoryMetadata = repositoryMetadata; + this.bucket = BUCKET_SETTING.get(repositoryMetadata.settings()); + this.serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(repositoryMetadata.settings()); + this.bufferSize = BUFFER_SIZE_SETTING.get(repositoryMetadata.settings()); + this.cannedACL = initCannedACL(CANNED_ACL_SETTING.get(repositoryMetadata.settings())); + this.storageClass = initStorageClass(STORAGE_CLASS_SETTING.get(repositoryMetadata.settings())); + this.bulkDeletesSize = BULK_DELETE_SIZE.get(repositoryMetadata.settings()); } @Override @@ -119,7 +142,7 @@ public AmazonS3Reference clientReference() { } public AmazonAsyncS3Reference asyncClientReference() { - return s3AsyncService.client(repositoryMetadata, priorityExecutorBuilder, normalExecutorBuilder); + return s3AsyncService.client(repositoryMetadata, urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder); } int getMaxRetries() { @@ -138,6 +161,10 @@ public long bufferSizeInBytes() { return bufferSize.getBytes(); } + public int getBulkDeletesSize() { + return bulkDeletesSize; + } + @Override public BlobContainer blobContainer(BlobPath path) { return new S3BlobContainer(path, this); @@ -158,6 +185,16 @@ public Map stats() { return statsMetricPublisher.getStats().toMap(); } + @Override + public Map> extendedStats() { + if (statsMetricPublisher.getExtendedStats() == null || statsMetricPublisher.getExtendedStats().isEmpty()) { + return Collections.emptyMap(); + } + Map> extendedStats = new HashMap<>(); + statsMetricPublisher.getExtendedStats().forEach((k, v) -> extendedStats.put(k, v.toMap())); + return extendedStats; + } + public ObjectCannedACL getCannedACL() { return cannedACL; } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java index 933136228b1bb..2392c66329e06 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java @@ -446,7 +446,7 @@ S3ClientSettings refine(Settings repositorySettings) { /** * Load all client settings from the given settings. - * + *

      * Note this will always at least return a client named "default". */ static Map load(final Settings settings, final Path configPath) { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java index 3022d02084448..95cf5eca0f2f6 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Repository.java @@ -32,6 +32,9 @@ package org.opensearch.repositories.s3; +import software.amazon.awssdk.services.s3.model.ObjectCannedACL; +import software.amazon.awssdk.services.s3.model.StorageClass; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.LegacyESVersion; @@ -43,9 +46,11 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.BlobStoreException; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.SecureSetting; import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; @@ -67,7 +72,11 @@ import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; +import java.nio.file.Path; +import java.util.ArrayList; import java.util.Collection; +import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -83,7 +92,6 @@ *

      {@code concurrent_streams}
      Number of concurrent read/write stream (per repository on each node). Defaults to 5.
      *
      {@code chunk_size}
      *
      Large file can be divided into chunks. This parameter specifies the chunk size. Defaults to not chucked.
      - *
      {@code compress}
      If set to true metadata files will be stored compressed. Defaults to false.
      * */ class S3Repository extends MeteredBlobStoreRepository { @@ -188,6 +196,13 @@ class S3Repository extends MeteredBlobStoreRepository { new ByteSizeValue(5, ByteSizeUnit.TB) ); + /** + * Maximum number of deletes in a DeleteObjectsRequest. + * + * @see S3 Documentation. + */ + static final Setting BULK_DELETE_SIZE = Setting.intSetting("bulk_delete_size", 1000, 1, 1000); + /** * Sets the S3 storage class type for the backup files. Values may be standard, reduced_redundancy, * standard_ia, onezone_ia and intelligent_tiering. Defaults to standard. @@ -227,21 +242,19 @@ class S3Repository extends MeteredBlobStoreRepository { private final S3Service service; - private final String bucket; - - private final ByteSizeValue bufferSize; + private volatile String bucket; - private final ByteSizeValue chunkSize; + private volatile ByteSizeValue bufferSize; - private final BlobPath basePath; + private volatile ByteSizeValue chunkSize; - private final boolean serverSideEncryption; + private volatile BlobPath basePath; - private final String storageClass; + private volatile boolean serverSideEncryption; - private final String cannedACL; + private volatile String storageClass; - private final RepositoryMetadata repositoryMetadata; + private volatile String cannedACL; /** * Time period to delay repository operations by after finalizing or deleting a snapshot. @@ -252,12 +265,14 @@ class S3Repository extends MeteredBlobStoreRepository { private final AsyncTransferManager asyncUploadUtils; private final S3AsyncService s3AsyncService; private final boolean multipartUploadEnabled; + private final AsyncExecutorContainer urgentExecutorBuilder; private final AsyncExecutorContainer priorityExecutorBuilder; private final AsyncExecutorContainer normalExecutorBuilder; + private final Path pluginConfigPath; - /** - * Constructs an s3 backed repository - */ + private volatile int bulkDeletesSize; + + // Used by test classes S3Repository( final RepositoryMetadata metadata, final NamedXContentRegistry namedXContentRegistry, @@ -265,84 +280,59 @@ class S3Repository extends MeteredBlobStoreRepository { final ClusterService clusterService, final RecoverySettings recoverySettings, final AsyncTransferManager asyncUploadUtils, + final AsyncExecutorContainer urgentExecutorBuilder, final AsyncExecutorContainer priorityExecutorBuilder, final AsyncExecutorContainer normalExecutorBuilder, final S3AsyncService s3AsyncService, final boolean multipartUploadEnabled ) { - super( + this( metadata, - COMPRESS_SETTING.get(metadata.settings()), namedXContentRegistry, + service, clusterService, recoverySettings, - buildLocation(metadata) + asyncUploadUtils, + urgentExecutorBuilder, + priorityExecutorBuilder, + normalExecutorBuilder, + s3AsyncService, + multipartUploadEnabled, + Path.of("") ); + } + + /** + * Constructs an s3 backed repository + */ + S3Repository( + final RepositoryMetadata metadata, + final NamedXContentRegistry namedXContentRegistry, + final S3Service service, + final ClusterService clusterService, + final RecoverySettings recoverySettings, + final AsyncTransferManager asyncUploadUtils, + final AsyncExecutorContainer urgentExecutorBuilder, + final AsyncExecutorContainer priorityExecutorBuilder, + final AsyncExecutorContainer normalExecutorBuilder, + final S3AsyncService s3AsyncService, + final boolean multipartUploadEnabled, + Path pluginConfigPath + ) { + super(metadata, namedXContentRegistry, clusterService, recoverySettings, buildLocation(metadata)); this.service = service; this.s3AsyncService = s3AsyncService; this.multipartUploadEnabled = multipartUploadEnabled; - - this.repositoryMetadata = metadata; + this.pluginConfigPath = pluginConfigPath; this.asyncUploadUtils = asyncUploadUtils; + this.urgentExecutorBuilder = urgentExecutorBuilder; this.priorityExecutorBuilder = priorityExecutorBuilder; this.normalExecutorBuilder = normalExecutorBuilder; - // Parse and validate the user's S3 Storage Class setting - this.bucket = BUCKET_SETTING.get(metadata.settings()); - if (bucket == null) { - throw new RepositoryException(metadata.name(), "No bucket defined for s3 repository"); - } - - this.bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); - this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); - - // We make sure that chunkSize is bigger or equal than/to bufferSize - if (this.chunkSize.getBytes() < bufferSize.getBytes()) { - throw new RepositoryException( - metadata.name(), - CHUNK_SIZE_SETTING.getKey() - + " (" - + this.chunkSize - + ") can't be lower than " - + BUFFER_SIZE_SETTING.getKey() - + " (" - + bufferSize - + ")." - ); - } - - final String basePath = BASE_PATH_SETTING.get(metadata.settings()); - if (Strings.hasLength(basePath)) { - this.basePath = new BlobPath().add(basePath); - } else { - this.basePath = BlobPath.cleanPath(); - } - - this.serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); - - this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); - this.cannedACL = CANNED_ACL_SETTING.get(metadata.settings()); - - if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { - // provided repository settings - deprecationLogger.deprecate( - "s3_repository_secret_settings", - "Using s3 access/secret key from repository settings. Instead " - + "store these in named clients and the opensearch keystore for secure settings." - ); - } + validateRepositoryMetadata(metadata); + readRepositoryMetadata(); coolDown = COOLDOWN_PERIOD.get(metadata.settings()); - - logger.debug( - "using bucket [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], cannedACL [{}], storageClass [{}]", - bucket, - chunkSize, - serverSideEncryption, - bufferSize, - cannedACL, - storageClass - ); } private static Map buildLocation(RepositoryMetadata metadata) { @@ -450,14 +440,16 @@ protected S3BlobStore createBlobStore() { bufferSize, cannedACL, storageClass, - repositoryMetadata, + bulkDeletesSize, + metadata, asyncUploadUtils, + urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder ); } - // only use for testing + // only use for testing (S3RepositoryTests) @Override protected BlobStore getBlobStore() { return super.getBlobStore(); @@ -468,11 +460,142 @@ public BlobPath basePath() { return basePath; } + @Override + public boolean isReloadable() { + return true; + } + + @Override + public void reload(RepositoryMetadata newRepositoryMetadata) { + if (isReloadable() == false) { + return; + } + + // Reload configs for S3Repository + super.reload(newRepositoryMetadata); + readRepositoryMetadata(); + + // Reload configs for S3RepositoryPlugin + service.settings(metadata); + s3AsyncService.settings(metadata); + + // Reload configs for S3BlobStore + BlobStore blobStore = getBlobStore(); + blobStore.reload(metadata); + } + + /** + * Reloads the values derived from the Repository Metadata + */ + private void readRepositoryMetadata() { + this.bucket = BUCKET_SETTING.get(metadata.settings()); + this.bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); + this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); + final String basePath = BASE_PATH_SETTING.get(metadata.settings()); + if (Strings.hasLength(basePath)) { + this.basePath = new BlobPath().add(basePath); + } else { + this.basePath = BlobPath.cleanPath(); + } + + this.serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); + this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); + this.cannedACL = CANNED_ACL_SETTING.get(metadata.settings()); + this.bulkDeletesSize = BULK_DELETE_SIZE.get(metadata.settings()); + if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { + // provided repository settings + deprecationLogger.deprecate( + "s3_repository_secret_settings", + "Using s3 access/secret key from repository settings. Instead " + + "store these in named clients and the opensearch keystore for secure settings." + ); + } + + logger.debug( + "using bucket [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], cannedACL [{}], storageClass [{}]", + bucket, + chunkSize, + serverSideEncryption, + bufferSize, + cannedACL, + storageClass + ); + } + + @Override + public void validateMetadata(RepositoryMetadata newRepositoryMetadata) { + super.validateMetadata(newRepositoryMetadata); + validateRepositoryMetadata(newRepositoryMetadata); + } + + private void validateRepositoryMetadata(RepositoryMetadata newRepositoryMetadata) { + Settings settings = newRepositoryMetadata.settings(); + if (BUCKET_SETTING.get(settings) == null) { + throw new RepositoryException(newRepositoryMetadata.name(), "No bucket defined for s3 repository"); + } + + // We make sure that chunkSize is bigger or equal than/to bufferSize + if (CHUNK_SIZE_SETTING.get(settings).getBytes() < BUFFER_SIZE_SETTING.get(settings).getBytes()) { + throw new RepositoryException( + newRepositoryMetadata.name(), + CHUNK_SIZE_SETTING.getKey() + + " (" + + CHUNK_SIZE_SETTING.get(settings) + + ") can't be lower than " + + BUFFER_SIZE_SETTING.getKey() + + " (" + + BUFFER_SIZE_SETTING.get(settings) + + ")." + ); + } + + validateStorageClass(STORAGE_CLASS_SETTING.get(settings)); + validateCannedACL(CANNED_ACL_SETTING.get(settings)); + } + + private static void validateStorageClass(String storageClassStringValue) { + if ((storageClassStringValue == null) || storageClassStringValue.equals("")) { + return; + } + + final StorageClass storageClass = StorageClass.fromValue(storageClassStringValue.toUpperCase(Locale.ENGLISH)); + if (storageClass.equals(StorageClass.GLACIER)) { + throw new BlobStoreException("Glacier storage class is not supported"); + } + + if (storageClass == StorageClass.UNKNOWN_TO_SDK_VERSION) { + throw new BlobStoreException("`" + storageClassStringValue + "` is not a valid S3 Storage Class."); + } + } + + private static void validateCannedACL(String cannedACLStringValue) { + if ((cannedACLStringValue == null) || cannedACLStringValue.equals("")) { + return; + } + + for (final ObjectCannedACL cur : ObjectCannedACL.values()) { + if (cur.toString().equalsIgnoreCase(cannedACLStringValue)) { + return; + } + } + + throw new BlobStoreException("cannedACL is not valid: [" + cannedACLStringValue + "]"); + } + @Override protected ByteSizeValue chunkSize() { return chunkSize; } + @Override + public List> getRestrictedSystemRepositorySettings() { + List> restrictedSettings = new ArrayList<>(); + restrictedSettings.addAll(super.getRestrictedSystemRepositorySettings()); + restrictedSettings.add(BUCKET_SETTING); + restrictedSettings.add(BASE_PATH_SETTING); + return restrictedSettings; + } + @Override protected void doClose() { final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java index 6ef60474afe8c..9ed232464d080 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -55,6 +56,7 @@ import org.opensearch.script.ScriptService; import org.opensearch.threadpool.ExecutorBuilder; import org.opensearch.threadpool.FixedExecutorBuilder; +import org.opensearch.threadpool.ScalingExecutorBuilder; import org.opensearch.threadpool.ThreadPool; import org.opensearch.watcher.ResourceWatcherService; @@ -73,6 +75,9 @@ * A plugin to add a repository type that writes to and from the AWS S3. */ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin { + + private static final String URGENT_FUTURE_COMPLETION = "urgent_future_completion"; + private static final String URGENT_STREAM_READER = "urgent_stream_reader"; private static final String PRIORITY_FUTURE_COMPLETION = "priority_future_completion"; private static final String PRIORITY_STREAM_READER = "priority_stream_reader"; private static final String FUTURE_COMPLETION = "future_completion"; @@ -83,6 +88,7 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo private final Path configPath; + private AsyncExecutorContainer urgentExecutorBuilder; private AsyncExecutorContainer priorityExecutorBuilder; private AsyncExecutorContainer normalExecutorBuilder; @@ -93,17 +99,25 @@ public S3RepositoryPlugin(final Settings settings, final Path configPath) { @Override public List> getExecutorBuilders(Settings settings) { List> executorBuilders = new ArrayList<>(); + int halfProcMaxAt5 = halfAllocatedProcessorsMaxFive(allocatedProcessors(settings)); executorBuilders.add( - new FixedExecutorBuilder(settings, PRIORITY_FUTURE_COMPLETION, priorityPoolCount(settings), 10_000, PRIORITY_FUTURE_COMPLETION) + new FixedExecutorBuilder(settings, URGENT_FUTURE_COMPLETION, urgentPoolCount(settings), 10_000, URGENT_FUTURE_COMPLETION) ); + executorBuilders.add(new ScalingExecutorBuilder(URGENT_STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); executorBuilders.add( - new FixedExecutorBuilder(settings, PRIORITY_STREAM_READER, priorityPoolCount(settings), 10_000, PRIORITY_STREAM_READER) + new FixedExecutorBuilder(settings, PRIORITY_FUTURE_COMPLETION, priorityPoolCount(settings), 10_000, PRIORITY_FUTURE_COMPLETION) ); + executorBuilders.add(new ScalingExecutorBuilder(PRIORITY_STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); + executorBuilders.add(new FixedExecutorBuilder(settings, FUTURE_COMPLETION, normalPoolCount(settings), 10_000, FUTURE_COMPLETION)); - executorBuilders.add(new FixedExecutorBuilder(settings, STREAM_READER, normalPoolCount(settings), 10_000, STREAM_READER)); + executorBuilders.add(new ScalingExecutorBuilder(STREAM_READER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5))); return executorBuilders; } + static int halfAllocatedProcessorsMaxFive(final int allocatedProcessors) { + return boundedBy((allocatedProcessors + 1) / 2, 1, 5); + } + S3RepositoryPlugin(final Settings settings, final Path configPath, final S3Service service, final S3AsyncService s3AsyncService) { this.service = Objects.requireNonNull(service, "S3 service must not be null"); this.configPath = configPath; @@ -122,6 +136,10 @@ private static int allocatedProcessors(Settings settings) { return OpenSearchExecutors.allocatedProcessors(settings); } + private static int urgentPoolCount(Settings settings) { + return boundedBy((allocatedProcessors(settings) + 7) / 8, 1, 2); + } + private static int priorityPoolCount(Settings settings) { return boundedBy((allocatedProcessors(settings) + 1) / 2, 2, 4); } @@ -144,8 +162,14 @@ public Collection createComponents( final IndexNameExpressionResolver expressionResolver, final Supplier repositoriesServiceSupplier ) { + int urgentEventLoopThreads = urgentPoolCount(clusterService.getSettings()); int priorityEventLoopThreads = priorityPoolCount(clusterService.getSettings()); int normalEventLoopThreads = normalPoolCount(clusterService.getSettings()); + this.urgentExecutorBuilder = new AsyncExecutorContainer( + threadPool.executor(URGENT_FUTURE_COMPLETION), + threadPool.executor(URGENT_STREAM_READER), + new AsyncTransferEventLoopGroup(urgentEventLoopThreads) + ); this.priorityExecutorBuilder = new AsyncExecutorContainer( threadPool.executor(PRIORITY_FUTURE_COMPLETION), threadPool.executor(PRIORITY_STREAM_READER), @@ -170,7 +194,8 @@ protected S3Repository createRepository( AsyncTransferManager asyncUploadUtils = new AsyncTransferManager( S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.get(clusterService.getSettings()).getBytes(), normalExecutorBuilder.getStreamReader(), - priorityExecutorBuilder.getStreamReader() + priorityExecutorBuilder.getStreamReader(), + urgentExecutorBuilder.getStreamReader() ); return new S3Repository( metadata, @@ -179,10 +204,12 @@ protected S3Repository createRepository( clusterService, recoverySettings, asyncUploadUtils, + urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder, s3AsyncService, - S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING.get(clusterService.getSettings()) + S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING.get(clusterService.getSettings()), + configPath ); } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java index 50f7115cd03b6..d7e47e0ab1bcc 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java @@ -40,8 +40,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; -import org.opensearch.common.collect.Tuple; import org.opensearch.common.util.io.IOUtils; import org.opensearch.repositories.s3.utils.HttpRangeUtils; @@ -55,8 +53,8 @@ /** * Wrapper around an S3 object that will retry the {@link GetObjectRequest} if the download fails part-way through, resuming from where * the failure occurred. This should be handled by the SDK but it isn't today. This should be revisited in the future (e.g. before removing - * the {@link LegacyESVersion#V_7_0_0} version constant) and removed when the SDK handles retries itself. - * + * the {@code LegacyESVersion#V_7_0_0} version constant) and removed when the SDK handles retries itself. + *

      * See https://github.com/aws/aws-sdk-java/issues/856 for the related SDK issue */ class S3RetryingInputStream extends InputStream { @@ -121,7 +119,7 @@ private void openStream() throws IOException { ); this.currentStreamLastOffset = Math.addExact( Math.addExact(start, currentOffset), - getStreamLength(getObjectResponseInputStream.response()) + getObjectResponseInputStream.response().contentLength() ); this.currentStream = getObjectResponseInputStream; this.isStreamAborted.set(false); @@ -135,29 +133,6 @@ private void openStream() throws IOException { } } - private long getStreamLength(final GetObjectResponse getObjectResponse) { - try { - // Returns the content range of the object if response contains the Content-Range header. - if (getObjectResponse.contentRange() != null) { - final Tuple s3ResponseRange = HttpRangeUtils.fromHttpRangeHeader(getObjectResponse.contentRange()); - assert s3ResponseRange.v2() >= s3ResponseRange.v1() : s3ResponseRange.v2() + " vs " + s3ResponseRange.v1(); - assert s3ResponseRange.v1() == start + currentOffset : "Content-Range start value [" - + s3ResponseRange.v1() - + "] exceeds start [" - + start - + "] + current offset [" - + currentOffset - + ']'; - assert s3ResponseRange.v2() == end : "Content-Range end value [" + s3ResponseRange.v2() + "] exceeds end [" + end + ']'; - return s3ResponseRange.v2() - s3ResponseRange.v1() + 1L; - } - return getObjectResponse.contentLength(); - } catch (Exception e) { - assert false : e; - return Long.MAX_VALUE - 1L; // assume a large stream so that the underlying stream is aborted on closing, unless eof is reached - } - } - @Override public int read() throws IOException { ensureOpen(); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java index b13672b4179f8..b1b3e19eac275 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java @@ -90,6 +90,7 @@ import java.security.SecureRandom; import java.time.Duration; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import static java.util.Collections.emptyMap; @@ -100,7 +101,7 @@ class S3Service implements Closeable { private static final String DEFAULT_S3_ENDPOINT = "s3.amazonaws.com"; - private volatile Map clientsCache = emptyMap(); + private volatile Map clientsCache = new ConcurrentHashMap<>(); /** * Client settings calculated from static configuration and settings in the keystore. @@ -111,7 +112,7 @@ class S3Service implements Closeable { * Client settings derived from those in {@link #staticClientSettings} by combining them with settings * in the {@link RepositoryMetadata}. */ - private volatile Map derivedClientSettings = emptyMap(); + private volatile Map derivedClientSettings = new ConcurrentHashMap<>(); S3Service(final Path configPath) { staticClientSettings = MapBuilder.newMapBuilder() diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java index cad0037f99249..0c63bfdb1ff97 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java @@ -8,10 +8,13 @@ package org.opensearch.repositories.s3; -import software.amazon.awssdk.http.HttpMetric; import software.amazon.awssdk.metrics.MetricCollection; import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.metrics.MetricRecord; +import org.opensearch.common.blobstore.BlobStore; + +import java.time.Duration; import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; @@ -20,18 +23,67 @@ public class StatsMetricPublisher { private final Stats stats = new Stats(); + private final Map extendedStats = new HashMap<>() { + { + put(BlobStore.Metric.REQUEST_LATENCY, new Stats()); + put(BlobStore.Metric.REQUEST_SUCCESS, new Stats()); + put(BlobStore.Metric.REQUEST_FAILURE, new Stats()); + put(BlobStore.Metric.RETRY_COUNT, new Stats()); + } + }; + public MetricPublisher listObjectsMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.listCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).listMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).listMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).listMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).listMetrics.addAndGet(1); + } + stats.listMetrics.addAndGet(1); + break; + } + } + } + + @Override + public void close() {} + }; + + public MetricPublisher deleteObjectsMetricPublisher = new MetricPublisher() { + @Override + public void publish(MetricCollection metricCollection) { + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).deleteMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).deleteMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).deleteMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).deleteMetrics.addAndGet(1); + } + stats.deleteMetrics.addAndGet(1); + break; + } + } } @Override @@ -41,15 +93,26 @@ public void close() {} public MetricPublisher getObjectMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.getCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).getMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).getMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).getMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).getMetrics.addAndGet(1); + } + stats.getMetrics.addAndGet(1); + break; + } + } } @Override @@ -59,15 +122,26 @@ public void close() {} public MetricPublisher putObjectMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.putCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).putMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).putMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).putMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).putMetrics.addAndGet(1); + } + stats.putMetrics.addAndGet(1); + break; + } + } } @Override @@ -77,15 +151,26 @@ public void close() {} public MetricPublisher multipartUploadMetricCollector = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { - stats.postCount.addAndGet( - metricCollection.children() - .stream() - .filter( - metricRecords -> metricRecords.name().equals("ApiCallAttempt") - && !metricRecords.metricValues(HttpMetric.HTTP_STATUS_CODE).isEmpty() - ) - .count() - ); + for (MetricRecord metricRecord : metricCollection) { + switch (metricRecord.metric().name()) { + case "ApiCallDuration": + extendedStats.get(BlobStore.Metric.REQUEST_LATENCY).multiPartPutMetrics.addAndGet( + ((Duration) metricRecord.value()).toMillis() + ); + break; + case "RetryCount": + extendedStats.get(BlobStore.Metric.RETRY_COUNT).multiPartPutMetrics.addAndGet(((Integer) metricRecord.value())); + break; + case "ApiCallSuccessful": + if ((Boolean) metricRecord.value()) { + extendedStats.get(BlobStore.Metric.REQUEST_SUCCESS).multiPartPutMetrics.addAndGet(1); + } else { + extendedStats.get(BlobStore.Metric.REQUEST_FAILURE).multiPartPutMetrics.addAndGet(1); + } + stats.multiPartPutMetrics.addAndGet(1); + break; + } + } } @Override @@ -96,22 +181,29 @@ public Stats getStats() { return stats; } + public Map getExtendedStats() { + return extendedStats; + } + static class Stats { - final AtomicLong listCount = new AtomicLong(); + final AtomicLong listMetrics = new AtomicLong(); + + final AtomicLong getMetrics = new AtomicLong(); - final AtomicLong getCount = new AtomicLong(); + final AtomicLong putMetrics = new AtomicLong(); - final AtomicLong putCount = new AtomicLong(); + final AtomicLong deleteMetrics = new AtomicLong(); - final AtomicLong postCount = new AtomicLong(); + final AtomicLong multiPartPutMetrics = new AtomicLong(); Map toMap() { final Map results = new HashMap<>(); - results.put("GetObject", getCount.get()); - results.put("ListObjects", listCount.get()); - results.put("PutObject", putCount.get()); - results.put("PutMultipartObject", postCount.get()); + results.put("GetObject", getMetrics.get()); + results.put("ListObjects", listMetrics.get()); + results.put("PutObject", putMetrics.get()); + results.put("DeleteObjects", deleteMetrics.get()); + results.put("PutMultipartObject", multiPartPutMetrics.get()); return results; } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java index ad6939ce299d6..933ee6dc29513 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncPartsHandler.java @@ -23,10 +23,13 @@ import org.opensearch.common.StreamContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.s3.SocketAccess; import org.opensearch.repositories.s3.io.CheckedContainer; +import java.io.BufferedInputStream; import java.io.IOException; +import java.io.InputStream; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; @@ -45,6 +48,7 @@ public class AsyncPartsHandler { * @param s3AsyncClient S3 client to use for upload * @param executorService Thread pool for regular upload * @param priorityExecutorService Thread pool for priority uploads + * @param urgentExecutorService Thread pool for urgent uploads * @param uploadRequest request for upload * @param streamContext Stream context used in supplying individual file parts * @param uploadId Upload Id against which multi-part is being performed @@ -57,6 +61,7 @@ public static List> uploadParts( S3AsyncClient s3AsyncClient, ExecutorService executorService, ExecutorService priorityExecutorService, + ExecutorService urgentExecutorService, UploadRequest uploadRequest, StreamContext streamContext, String uploadId, @@ -80,6 +85,7 @@ public static List> uploadParts( s3AsyncClient, executorService, priorityExecutorService, + urgentExecutorService, completedParts, inputStreamContainers, futures, @@ -126,6 +132,7 @@ private static void uploadPart( S3AsyncClient s3AsyncClient, ExecutorService executorService, ExecutorService priorityExecutorService, + ExecutorService urgentExecutorService, AtomicReferenceArray completedParts, AtomicReferenceArray inputStreamContainers, List> futures, @@ -135,29 +142,47 @@ private static void uploadPart( ) { Integer partNumber = uploadPartRequest.partNumber(); - ExecutorService streamReadExecutor = uploadRequest.getWritePriority() == WritePriority.HIGH - ? priorityExecutorService - : executorService; + ExecutorService streamReadExecutor; + if (uploadRequest.getWritePriority() == WritePriority.URGENT) { + streamReadExecutor = urgentExecutorService; + } else if (uploadRequest.getWritePriority() == WritePriority.HIGH) { + streamReadExecutor = priorityExecutorService; + } else { + streamReadExecutor = executorService; + } + // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered + // data can be retried instead of retrying whole file by the application. + InputStream inputStream = new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)); CompletableFuture uploadPartResponseFuture = SocketAccess.doPrivileged( () -> s3AsyncClient.uploadPart( uploadPartRequest, - AsyncRequestBody.fromInputStream( - inputStreamContainer.getInputStream(), - inputStreamContainer.getContentLength(), - streamReadExecutor - ) + AsyncRequestBody.fromInputStream(inputStream, inputStreamContainer.getContentLength(), streamReadExecutor) ) ); - CompletableFuture convertFuture = uploadPartResponseFuture.thenApply( - uploadPartResponse -> convertUploadPartResponse( - completedParts, - inputStreamContainers, - uploadPartResponse, - partNumber, - uploadRequest.doRemoteDataIntegrityCheck() - ) - ); + CompletableFuture convertFuture = uploadPartResponseFuture.whenComplete((resp, throwable) -> { + try { + inputStream.close(); + } catch (IOException ex) { + log.error( + () -> new ParameterizedMessage( + "Failed to close stream while uploading a part of idx {} and file {}.", + uploadPartRequest.partNumber(), + uploadPartRequest.key() + ), + ex + ); + } + }) + .thenApply( + uploadPartResponse -> convertUploadPartResponse( + completedParts, + inputStreamContainers, + uploadPartResponse, + partNumber, + uploadRequest.doRemoteDataIntegrityCheck() + ) + ); futures.add(convertFuture); CompletableFutureUtils.forwardExceptionTo(convertFuture, uploadPartResponseFuture); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java index 8d45c2167a3d1..4f1ab9764702e 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/AsyncTransferManager.java @@ -35,9 +35,12 @@ import org.opensearch.common.util.ByteUtils; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.s3.SocketAccess; +import org.opensearch.repositories.s3.StatsMetricPublisher; import org.opensearch.repositories.s3.io.CheckedContainer; +import java.io.BufferedInputStream; import java.io.IOException; +import java.io.InputStream; import java.util.Arrays; import java.util.Base64; import java.util.List; @@ -58,6 +61,7 @@ public final class AsyncTransferManager { private static final Logger log = LogManager.getLogger(AsyncTransferManager.class); private final ExecutorService executorService; private final ExecutorService priorityExecutorService; + private final ExecutorService urgentExecutorService; private final long minimumPartSize; /** @@ -72,10 +76,16 @@ public final class AsyncTransferManager { * @param executorService The stream reader {@link ExecutorService} for normal priority uploads * @param priorityExecutorService The stream read {@link ExecutorService} for high priority uploads */ - public AsyncTransferManager(long minimumPartSize, ExecutorService executorService, ExecutorService priorityExecutorService) { + public AsyncTransferManager( + long minimumPartSize, + ExecutorService executorService, + ExecutorService priorityExecutorService, + ExecutorService urgentExecutorService + ) { this.executorService = executorService; this.priorityExecutorService = priorityExecutorService; this.minimumPartSize = minimumPartSize; + this.urgentExecutorService = urgentExecutorService; } /** @@ -86,16 +96,21 @@ public AsyncTransferManager(long minimumPartSize, ExecutorService executorServic * @param streamContext The {@link StreamContext} to supply streams during upload * @return A {@link CompletableFuture} to listen for upload completion */ - public CompletableFuture uploadObject(S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, StreamContext streamContext) { + public CompletableFuture uploadObject( + S3AsyncClient s3AsyncClient, + UploadRequest uploadRequest, + StreamContext streamContext, + StatsMetricPublisher statsMetricPublisher + ) { CompletableFuture returnFuture = new CompletableFuture<>(); try { if (streamContext.getNumberOfParts() == 1) { log.debug(() -> "Starting the upload as a single upload part request"); - uploadInOneChunk(s3AsyncClient, uploadRequest, streamContext.provideStream(0), returnFuture); + uploadInOneChunk(s3AsyncClient, uploadRequest, streamContext.provideStream(0), returnFuture, statsMetricPublisher); } else { log.debug(() -> "Starting the upload as multipart upload request"); - uploadInParts(s3AsyncClient, uploadRequest, streamContext, returnFuture); + uploadInParts(s3AsyncClient, uploadRequest, streamContext, returnFuture, statsMetricPublisher); } } catch (Throwable throwable) { returnFuture.completeExceptionally(throwable); @@ -108,12 +123,14 @@ private void uploadInParts( S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, StreamContext streamContext, - CompletableFuture returnFuture + CompletableFuture returnFuture, + StatsMetricPublisher statsMetricPublisher ) { CreateMultipartUploadRequest.Builder createMultipartUploadRequestBuilder = CreateMultipartUploadRequest.builder() .bucket(uploadRequest.getBucket()) - .key(uploadRequest.getKey()); + .key(uploadRequest.getKey()) + .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.multipartUploadMetricCollector)); if (uploadRequest.doRemoteDataIntegrityCheck()) { createMultipartUploadRequestBuilder.checksumAlgorithm(ChecksumAlgorithm.CRC32); } @@ -152,6 +169,7 @@ private void doUploadInParts( s3AsyncClient, executorService, priorityExecutorService, + urgentExecutorService, uploadRequest, streamContext, uploadId, @@ -286,28 +304,42 @@ private void uploadInOneChunk( S3AsyncClient s3AsyncClient, UploadRequest uploadRequest, InputStreamContainer inputStreamContainer, - CompletableFuture returnFuture + CompletableFuture returnFuture, + StatsMetricPublisher statsMetricPublisher ) { PutObjectRequest.Builder putObjectRequestBuilder = PutObjectRequest.builder() .bucket(uploadRequest.getBucket()) .key(uploadRequest.getKey()) - .contentLength(uploadRequest.getContentLength()); + .contentLength(uploadRequest.getContentLength()) + .overrideConfiguration(o -> o.addMetricPublisher(statsMetricPublisher.putObjectMetricPublisher)); if (uploadRequest.doRemoteDataIntegrityCheck()) { putObjectRequestBuilder.checksumAlgorithm(ChecksumAlgorithm.CRC32); putObjectRequestBuilder.checksumCRC32(base64StringFromLong(uploadRequest.getExpectedChecksum())); } - ExecutorService streamReadExecutor = uploadRequest.getWritePriority() == WritePriority.HIGH - ? priorityExecutorService - : executorService; + ExecutorService streamReadExecutor; + if (uploadRequest.getWritePriority() == WritePriority.URGENT) { + streamReadExecutor = urgentExecutorService; + } else if (uploadRequest.getWritePriority() == WritePriority.HIGH) { + streamReadExecutor = priorityExecutorService; + } else { + streamReadExecutor = executorService; + } + // Buffered stream is needed to allow mark and reset ops during IO errors so that only buffered + // data can be retried instead of retrying whole file by the application. + InputStream inputStream = new BufferedInputStream(inputStreamContainer.getInputStream(), (int) (ByteSizeUnit.MB.toBytes(1) + 1)); CompletableFuture putObjectFuture = SocketAccess.doPrivileged( () -> s3AsyncClient.putObject( putObjectRequestBuilder.build(), - AsyncRequestBody.fromInputStream( - inputStreamContainer.getInputStream(), - inputStreamContainer.getContentLength(), - streamReadExecutor - ) + AsyncRequestBody.fromInputStream(inputStream, inputStreamContainer.getContentLength(), streamReadExecutor) ).handle((resp, throwable) -> { + try { + inputStream.close(); + } catch (IOException e) { + log.error( + () -> new ParameterizedMessage("Failed to close stream while uploading single file {}.", uploadRequest.getKey()), + e + ); + } if (throwable != null) { Throwable unwrappedThrowable = ExceptionsHelper.unwrap(throwable, S3Exception.class); if (unwrappedThrowable != null) { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/utils/HttpRangeUtils.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/utils/HttpRangeUtils.java index 97b9829124d0d..2e2fc9b86a45b 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/utils/HttpRangeUtils.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/utils/HttpRangeUtils.java @@ -10,23 +10,34 @@ import software.amazon.awssdk.core.exception.SdkException; -import org.opensearch.common.collect.Tuple; - import java.util.regex.Matcher; import java.util.regex.Pattern; -public class HttpRangeUtils { - - private static final Pattern RANGE_PATTERN = Pattern.compile("^bytes=([0-9]+)-([0-9]+)$"); - - public static Tuple fromHttpRangeHeader(String headerValue) { +public final class HttpRangeUtils { + private static final Pattern RANGE_PATTERN = Pattern.compile("^bytes\\s+(\\d+)-\\d+[/\\d*]+$"); + + /** + * Parses the content range header string value to calculate the start (offset) of the HTTP response. + * Tests against the RFC9110 specification of content range string. + * Sample values: "bytes 0-10/200", "bytes 0-10/*" + * Details here + * @param headerValue Header content range string value from the HTTP response + * @return Start (Offset) value of the HTTP response + */ + public static Long getStartOffsetFromRangeHeader(String headerValue) { Matcher matcher = RANGE_PATTERN.matcher(headerValue); if (!matcher.find()) { throw SdkException.create("Regex match for Content-Range header {" + headerValue + "} failed", new RuntimeException()); } - return new Tuple<>(Long.parseLong(matcher.group(1)), Long.parseLong(matcher.group(2))); + return Long.parseLong(matcher.group(1)); } + /** + * Provides a byte range string per RFC 9110 + * @param start start position (inclusive) + * @param end end position (inclusive) + * @return A 'bytes=start-end' string + */ public static String toHttpRangeHeader(long start, long end) { return "bytes=" + start + "-" + end; } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java index a4bfe11383b4f..8e1926d40302f 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java @@ -302,7 +302,7 @@ protected S3Repository createRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, false) { + return new S3Repository(metadata, registry, service, clusterService, recoverySettings, null, null, null, null, null, false) { @Override protected void assertSnapshotOrGenericThread() { // eliminate thread name check as we create repo manually on test/main threads diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java index e9fe557ab751a..de9ad46bb222d 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3AsyncServiceTests.java @@ -44,12 +44,12 @@ public void testCachedClientsAreReleased() { final S3ClientSettings otherClientSettings = s3AsyncService.settings(metadata2); assertSame(clientSettings, otherClientSettings); final AmazonAsyncS3Reference reference = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); reference.close(); s3AsyncService.close(); final AmazonAsyncS3Reference referenceReloaded = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); assertNotSame(referenceReloaded, reference); referenceReloaded.close(); @@ -79,12 +79,12 @@ public void testCachedClientsWithCredentialsAreReleased() { final S3ClientSettings otherClientSettings = s3AsyncService.settings(metadata2); assertSame(clientSettings, otherClientSettings); final AmazonAsyncS3Reference reference = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); reference.close(); s3AsyncService.close(); final AmazonAsyncS3Reference referenceReloaded = SocketAccess.doPrivileged( - () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer) + () -> s3AsyncService.client(metadata1, asyncExecutorContainer, asyncExecutorContainer, asyncExecutorContainer) ); assertNotSame(referenceReloaded, reference); referenceReloaded.close(); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java index 8c8524212e08e..7c67519f2f3b0 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java @@ -64,6 +64,7 @@ import org.mockito.invocation.InvocationOnMock; +import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -265,10 +266,11 @@ public void verifySingleChunkUploadCallCount(boolean finalizeUploadFailure) { @Override public AmazonAsyncS3Reference client( RepositoryMetadata repositoryMetadata, + AsyncExecutorContainer urgentExecutorBuilder, AsyncExecutorContainer priorityExecutorBuilder, AsyncExecutorContainer normalExecutorBuilder ) { - return new AmazonAsyncS3Reference(AmazonAsyncS3WithCredentials.create(asyncClient, asyncClient, null)); + return new AmazonAsyncS3Reference(AmazonAsyncS3WithCredentials.create(asyncClient, asyncClient, asyncClient, null)); } } @@ -387,13 +389,16 @@ private S3BlobStore createBlobStore() { S3Repository.BUFFER_SIZE_SETTING.getDefault(Settings.EMPTY), S3Repository.CANNED_ACL_SETTING.getDefault(Settings.EMPTY), S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY), + BULK_DELETE_SIZE.get(Settings.EMPTY), repositoryMetadata, new AsyncTransferManager( S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(), asyncExecutorContainer.getStreamReader(), + asyncExecutorContainer.getStreamReader(), asyncExecutorContainer.getStreamReader() ), asyncExecutorContainer, + asyncExecutorContainer, asyncExecutorContainer ); } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java index c9486c8bbfd78..ceab06bd051e9 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -41,9 +41,9 @@ import org.opensearch.common.Nullable; import org.opensearch.common.StreamContext; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.blobstore.VerifyingMultiStreamBlobContainer; import org.opensearch.common.blobstore.stream.write.StreamContextSupplier; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; @@ -95,6 +95,7 @@ import static org.opensearch.repositories.s3.S3ClientSettings.MAX_RETRIES_SETTING; import static org.opensearch.repositories.s3.S3ClientSettings.READ_TIMEOUT_SETTING; import static org.opensearch.repositories.s3.S3ClientSettings.REGION; +import static org.opensearch.repositories.s3.S3Repository.BULK_DELETE_SIZE; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -161,7 +162,7 @@ protected Class unresponsiveExceptionType() { } @Override - protected VerifyingMultiStreamBlobContainer createBlobContainer( + protected AsyncMultiStreamBlobContainer createBlobContainer( final @Nullable Integer maxRetries, final @Nullable TimeValue readTimeout, final @Nullable Boolean disableChunkedEncoding, @@ -215,13 +216,16 @@ protected VerifyingMultiStreamBlobContainer createBlobContainer( bufferSize == null ? S3Repository.BUFFER_SIZE_SETTING.getDefault(Settings.EMPTY) : bufferSize, S3Repository.CANNED_ACL_SETTING.getDefault(Settings.EMPTY), S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY), + BULK_DELETE_SIZE.get(Settings.EMPTY), repositoryMetadata, new AsyncTransferManager( S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(), asyncExecutorContainer.getStreamReader(), + asyncExecutorContainer.getStreamReader(), asyncExecutorContainer.getStreamReader() ), asyncExecutorContainer, + asyncExecutorContainer, asyncExecutorContainer ) ) { @@ -320,7 +324,7 @@ public void testWriteBlobByStreamsWithRetries() throws Exception { } }); - final VerifyingMultiStreamBlobContainer blobContainer = createBlobContainer(maxRetries, null, true, null); + final AsyncMultiStreamBlobContainer blobContainer = createBlobContainer(maxRetries, null, true, null); List openInputStreams = new ArrayList<>(); CountDownLatch countDownLatch = new CountDownLatch(1); AtomicReference exceptionRef = new AtomicReference<>(); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index 2438acaf7c1f2..58ad290a31e85 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -32,11 +32,15 @@ package org.opensearch.repositories.s3; +import software.amazon.awssdk.core.ResponseInputStream; +import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.AbortMultipartUploadResponse; +import software.amazon.awssdk.services.s3.model.Checksum; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest; import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.CompletedPart; @@ -44,6 +48,11 @@ import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesParts; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesRequest; +import software.amazon.awssdk.services.s3.model.GetObjectAttributesResponse; +import software.amazon.awssdk.services.s3.model.GetObjectRequest; +import software.amazon.awssdk.services.s3.model.GetObjectResponse; import software.amazon.awssdk.services.s3.model.HeadObjectRequest; import software.amazon.awssdk.services.s3.model.HeadObjectResponse; import software.amazon.awssdk.services.s3.model.ListObjectsV2Request; @@ -61,12 +70,15 @@ import software.amazon.awssdk.services.s3.model.UploadPartResponse; import software.amazon.awssdk.services.s3.paginators.ListObjectsV2Iterable; +import org.opensearch.action.LatchedActionListener; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStoreException; import org.opensearch.common.blobstore.DeleteResult; +import org.opensearch.common.blobstore.stream.read.ReadContext; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.io.InputStreamContainer; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.test.OpenSearchTestCase; @@ -85,14 +97,18 @@ import java.util.NoSuchElementException; import java.util.Set; import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.IntStream; import org.mockito.ArgumentCaptor; +import org.mockito.ArgumentMatchers; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -260,10 +276,12 @@ public void testDelete() throws IOException { final String bucketName = randomAlphaOfLengthBetween(1, 10); final BlobPath blobPath = new BlobPath(); + int bulkDeleteSize = 5; final S3BlobStore blobStore = mock(S3BlobStore.class); when(blobStore.bucket()).thenReturn(bucketName); when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize); final S3Client client = mock(S3Client.class); doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference(); @@ -281,8 +299,11 @@ public void testDelete() throws IOException { when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); final List keysDeleted = new ArrayList<>(); + AtomicInteger deleteCount = new AtomicInteger(); doAnswer(invocation -> { DeleteObjectsRequest deleteObjectsRequest = invocation.getArgument(0); + deleteCount.getAndIncrement(); + logger.info("Object sizes are{}", deleteObjectsRequest.delete().objects().size()); keysDeleted.addAll(deleteObjectsRequest.delete().objects().stream().map(ObjectIdentifier::key).collect(Collectors.toList())); return DeleteObjectsResponse.builder().build(); }).when(client).deleteObjects(any(DeleteObjectsRequest.class)); @@ -295,6 +316,8 @@ public void testDelete() throws IOException { // keysDeleted will have blobPath also assertEquals(listObjectsV2ResponseIterator.getKeysListed().size(), keysDeleted.size() - 1); assertTrue(keysDeleted.contains(blobPath.buildAsString())); + // keysDeleted will have blobPath also + assertEquals((int) Math.ceil(((double) keysDeleted.size() + 1) / bulkDeleteSize), deleteCount.get()); keysDeleted.remove(blobPath.buildAsString()); assertEquals(new HashSet<>(listObjectsV2ResponseIterator.getKeysListed()), new HashSet<>(keysDeleted)); } @@ -900,4 +923,418 @@ public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanPageSiz public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanNumberOfRecords() throws IOException { testListBlobsByPrefixInLexicographicOrder(12, 2, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); } + + public void testReadBlobAsyncMultiPart() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String checksum = randomAlphaOfLength(10); + + final long objectSize = 100L; + final int objectPartCount = 10; + final int partSize = 10; + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) + ); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); + + CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); + getObjectAttributesResponseCompletableFuture.complete( + GetObjectAttributesResponse.builder() + .checksum(Checksum.builder().checksumCRC32(checksum).build()) + .objectSize(objectSize) + .objectParts(GetObjectAttributesParts.builder().totalPartsCount(objectPartCount).build()) + .build() + ); + when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenReturn( + getObjectAttributesResponseCompletableFuture + ); + + mockObjectPartResponse(s3AsyncClient, bucketName, blobName, objectPartCount, partSize, objectSize); + + CountDownLatch countDownLatch = new CountDownLatch(1); + CountingCompletionListener readContextActionListener = new CountingCompletionListener<>(); + LatchedActionListener listener = new LatchedActionListener<>(readContextActionListener, countDownLatch); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + blobContainer.readBlobAsync(blobName, listener); + countDownLatch.await(); + + assertEquals(1, readContextActionListener.getResponseCount()); + assertEquals(0, readContextActionListener.getFailureCount()); + ReadContext readContext = readContextActionListener.getResponse(); + assertEquals(objectPartCount, readContext.getNumberOfParts()); + assertEquals(checksum, readContext.getBlobChecksum()); + assertEquals(objectSize, readContext.getBlobSize()); + + for (int partNumber = 1; partNumber < objectPartCount; partNumber++) { + InputStreamContainer inputStreamContainer = readContext.getPartStreams().get(partNumber).get().join(); + final int offset = partNumber * partSize; + assertEquals(partSize, inputStreamContainer.getContentLength()); + assertEquals(offset, inputStreamContainer.getOffset()); + assertEquals(partSize, inputStreamContainer.getInputStream().readAllBytes().length); + } + } + + public void testReadBlobAsyncSinglePart() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String checksum = randomAlphaOfLength(10); + + final int objectSize = 100; + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) + ); + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); + + CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); + getObjectAttributesResponseCompletableFuture.complete( + GetObjectAttributesResponse.builder() + .checksum(Checksum.builder().checksumCRC32(checksum).build()) + .objectSize((long) objectSize) + .build() + ); + when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenReturn( + getObjectAttributesResponseCompletableFuture + ); + + mockObjectResponse(s3AsyncClient, bucketName, blobName, objectSize); + + CountDownLatch countDownLatch = new CountDownLatch(1); + CountingCompletionListener readContextActionListener = new CountingCompletionListener<>(); + LatchedActionListener listener = new LatchedActionListener<>(readContextActionListener, countDownLatch); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + blobContainer.readBlobAsync(blobName, listener); + countDownLatch.await(); + + assertEquals(1, readContextActionListener.getResponseCount()); + assertEquals(0, readContextActionListener.getFailureCount()); + ReadContext readContext = readContextActionListener.getResponse(); + assertEquals(1, readContext.getNumberOfParts()); + assertEquals(checksum, readContext.getBlobChecksum()); + assertEquals(objectSize, readContext.getBlobSize()); + + InputStreamContainer inputStreamContainer = readContext.getPartStreams().stream().findFirst().get().get().join(); + assertEquals(objectSize, inputStreamContainer.getContentLength()); + assertEquals(0, inputStreamContainer.getOffset()); + assertEquals(objectSize, inputStreamContainer.getInputStream().readAllBytes().length); + + } + + public void testReadBlobAsyncFailure() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String checksum = randomAlphaOfLength(10); + + final long objectSize = 100L; + final int objectPartCount = 10; + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) + ); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); + + CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); + getObjectAttributesResponseCompletableFuture.complete( + GetObjectAttributesResponse.builder() + .checksum(Checksum.builder().checksumCRC32(checksum).build()) + .objectSize(objectSize) + .objectParts(GetObjectAttributesParts.builder().totalPartsCount(objectPartCount).build()) + .build() + ); + when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenThrow(new RuntimeException()); + + CountDownLatch countDownLatch = new CountDownLatch(1); + CountingCompletionListener readContextActionListener = new CountingCompletionListener<>(); + LatchedActionListener listener = new LatchedActionListener<>(readContextActionListener, countDownLatch); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + blobContainer.readBlobAsync(blobName, listener); + countDownLatch.await(); + + assertEquals(0, readContextActionListener.getResponseCount()); + assertEquals(1, readContextActionListener.getFailureCount()); + } + + public void testReadBlobAsyncOnCompleteFailureMissingData() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String checksum = randomAlphaOfLength(10); + + final long objectSize = 100L; + final int objectPartCount = 10; + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference amazonAsyncS3Reference = new AmazonAsyncS3Reference( + AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null) + ); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + when(blobStore.asyncClientReference()).thenReturn(amazonAsyncS3Reference); + + CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); + getObjectAttributesResponseCompletableFuture.complete( + GetObjectAttributesResponse.builder() + .checksum(Checksum.builder().build()) + .objectSize(null) + .objectParts(GetObjectAttributesParts.builder().totalPartsCount(objectPartCount).build()) + .build() + ); + when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenReturn( + getObjectAttributesResponseCompletableFuture + ); + + CountDownLatch countDownLatch = new CountDownLatch(1); + CountingCompletionListener readContextActionListener = new CountingCompletionListener<>(); + LatchedActionListener listener = new LatchedActionListener<>(readContextActionListener, countDownLatch); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + blobContainer.readBlobAsync(blobName, listener); + countDownLatch.await(); + + assertEquals(0, readContextActionListener.getResponseCount()); + assertEquals(1, readContextActionListener.getFailureCount()); + } + + public void testGetBlobMetadata() throws Exception { + final String checksum = randomAlphaOfLengthBetween(1, 10); + final long objectSize = 100L; + final int objectPartCount = 10; + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String bucketName = randomAlphaOfLengthBetween(1, 10); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + CompletableFuture getObjectAttributesResponseCompletableFuture = new CompletableFuture<>(); + getObjectAttributesResponseCompletableFuture.complete( + GetObjectAttributesResponse.builder() + .checksum(Checksum.builder().checksumCRC32(checksum).build()) + .objectSize(objectSize) + .objectParts(GetObjectAttributesParts.builder().totalPartsCount(objectPartCount).build()) + .build() + ); + when(s3AsyncClient.getObjectAttributes(any(GetObjectAttributesRequest.class))).thenReturn( + getObjectAttributesResponseCompletableFuture + ); + + CompletableFuture responseFuture = blobContainer.getBlobMetadata(s3AsyncClient, bucketName, blobName); + GetObjectAttributesResponse objectAttributesResponse = responseFuture.get(); + + assertEquals(checksum, objectAttributesResponse.checksum().checksumCRC32()); + assertEquals(Long.valueOf(objectSize), objectAttributesResponse.objectSize()); + assertEquals(Integer.valueOf(objectPartCount), objectAttributesResponse.objectParts().totalPartsCount()); + } + + public void testGetBlobPartInputStream() throws Exception { + final String blobName = randomAlphaOfLengthBetween(1, 10); + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final long contentLength = 10L; + final String contentRange = "bytes 10-20/100"; + final InputStream inputStream = ResponseInputStream.nullInputStream(); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final S3BlobStore blobStore = mock(S3BlobStore.class); + final BlobPath blobPath = new BlobPath(); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.serverSideEncryption()).thenReturn(false); + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + GetObjectResponse getObjectResponse = GetObjectResponse.builder().contentLength(contentLength).contentRange(contentRange).build(); + + CompletableFuture> getObjectPartResponse = new CompletableFuture<>(); + ResponseInputStream responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream); + getObjectPartResponse.complete(responseInputStream); + + when( + s3AsyncClient.getObject( + any(GetObjectRequest.class), + ArgumentMatchers.>>any() + ) + ).thenReturn(getObjectPartResponse); + + // Header based offset in case of a multi part object request + InputStreamContainer inputStreamContainer = blobContainer.getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobName, 0) + .get(); + + assertEquals(10, inputStreamContainer.getOffset()); + assertEquals(contentLength, inputStreamContainer.getContentLength()); + assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); + + // 0 offset in case of a single part object request + inputStreamContainer = blobContainer.getBlobPartInputStreamContainer(s3AsyncClient, bucketName, blobName, null).get(); + + assertEquals(0, inputStreamContainer.getOffset()); + assertEquals(contentLength, inputStreamContainer.getContentLength()); + assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); + } + + public void testTransformResponseToInputStreamContainer() throws Exception { + final String contentRange = "bytes 0-10/100"; + final long contentLength = 10L; + final InputStream inputStream = ResponseInputStream.nullInputStream(); + + GetObjectResponse getObjectResponse = GetObjectResponse.builder().contentLength(contentLength).build(); + + // Exception when content range absent for multipart object + ResponseInputStream responseInputStreamNoRange = new ResponseInputStream<>(getObjectResponse, inputStream); + assertThrows(SdkException.class, () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoRange, true)); + + // No exception when content range absent for single part object + ResponseInputStream responseInputStreamNoRangeSinglePart = new ResponseInputStream<>( + getObjectResponse, + inputStream + ); + InputStreamContainer inputStreamContainer = S3BlobContainer.transformResponseToInputStreamContainer( + responseInputStreamNoRangeSinglePart, + false + ); + assertEquals(contentLength, inputStreamContainer.getContentLength()); + assertEquals(0, inputStreamContainer.getOffset()); + + // Exception when length is absent + getObjectResponse = GetObjectResponse.builder().contentRange(contentRange).build(); + ResponseInputStream responseInputStreamNoContentLength = new ResponseInputStream<>( + getObjectResponse, + inputStream + ); + assertThrows( + SdkException.class, + () -> S3BlobContainer.transformResponseToInputStreamContainer(responseInputStreamNoContentLength, true) + ); + + // No exception when range and length both are present + getObjectResponse = GetObjectResponse.builder().contentRange(contentRange).contentLength(contentLength).build(); + ResponseInputStream responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream); + inputStreamContainer = S3BlobContainer.transformResponseToInputStreamContainer(responseInputStream, true); + assertEquals(contentLength, inputStreamContainer.getContentLength()); + assertEquals(0, inputStreamContainer.getOffset()); + assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); + } + + private void mockObjectResponse(S3AsyncClient s3AsyncClient, String bucketName, String blobName, int objectSize) { + + final InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(objectSize)); + + GetObjectResponse getObjectResponse = GetObjectResponse.builder().contentLength((long) objectSize).build(); + + CompletableFuture> getObjectPartResponse = new CompletableFuture<>(); + ResponseInputStream responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream); + getObjectPartResponse.complete(responseInputStream); + + GetObjectRequest getObjectRequest = GetObjectRequest.builder().bucket(bucketName).key(blobName).build(); + + when( + s3AsyncClient.getObject( + eq(getObjectRequest), + ArgumentMatchers.>>any() + ) + ).thenReturn(getObjectPartResponse); + + } + + private void mockObjectPartResponse( + S3AsyncClient s3AsyncClient, + String bucketName, + String blobName, + int totalNumberOfParts, + int partSize, + long objectSize + ) { + for (int partNumber = 1; partNumber <= totalNumberOfParts; partNumber++) { + final int start = (partNumber - 1) * partSize; + final int end = partNumber * partSize; + final String contentRange = "bytes " + start + "-" + end + "/" + objectSize; + final InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(partSize)); + + GetObjectResponse getObjectResponse = GetObjectResponse.builder() + .contentLength((long) partSize) + .contentRange(contentRange) + .build(); + + CompletableFuture> getObjectPartResponse = new CompletableFuture<>(); + ResponseInputStream responseInputStream = new ResponseInputStream<>(getObjectResponse, inputStream); + getObjectPartResponse.complete(responseInputStream); + + GetObjectRequest getObjectRequest = GetObjectRequest.builder().bucket(bucketName).key(blobName).partNumber(partNumber).build(); + + when( + s3AsyncClient.getObject( + eq(getObjectRequest), + ArgumentMatchers.>>any() + ) + ).thenReturn(getObjectPartResponse); + } + } + + private static class CountingCompletionListener implements ActionListener { + private int responseCount; + private int failureCount; + private T response; + private Exception exception; + + @Override + public void onResponse(T response) { + this.response = response; + responseCount++; + } + + @Override + public void onFailure(Exception e) { + exception = e; + failureCount++; + } + + public int getResponseCount() { + return responseCount; + } + + public int getFailureCount() { + return failureCount; + } + + public T getResponse() { + return response; + } + + public Exception getException() { + return exception; + } + } } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java index 533c3aa17009d..6fec535ae6301 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryTests.java @@ -36,17 +36,20 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.repositories.RepositoryException; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.BlobStoreTestUtil; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.Matchers; import java.nio.file.Path; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.containsString; @@ -122,7 +125,8 @@ public void testBasePathSetting() { } public void testDefaultBufferSize() { - final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", Settings.EMPTY); + Settings settings = Settings.builder().build(); + final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", settings); try (S3Repository s3repo = createS3Repo(metadata)) { assertThat(s3repo.getBlobStore(), is(nullValue())); s3repo.start(); @@ -133,6 +137,26 @@ public void testDefaultBufferSize() { } } + public void testIsReloadable() { + final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", Settings.EMPTY); + try (S3Repository s3repo = createS3Repo(metadata)) { + assertTrue(s3repo.isReloadable()); + } + } + + public void testRestrictedSettingsDefault() { + final RepositoryMetadata metadata = new RepositoryMetadata("dummy-repo", "mock", Settings.EMPTY); + try (S3Repository s3repo = createS3Repo(metadata)) { + List> restrictedSettings = s3repo.getRestrictedSystemRepositorySettings(); + assertThat(restrictedSettings.size(), is(5)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.READONLY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY)); + assertTrue(restrictedSettings.contains(S3Repository.BUCKET_SETTING)); + assertTrue(restrictedSettings.contains(S3Repository.BASE_PATH_SETTING)); + } + } + private S3Repository createS3Repo(RepositoryMetadata metadata) { return new S3Repository( metadata, @@ -144,6 +168,7 @@ private S3Repository createS3Repo(RepositoryMetadata metadata) { null, null, null, + null, false ) { @Override diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java index 8be1d72c95b15..b38d5119b4108 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RetryingInputStreamTests.java @@ -38,7 +38,6 @@ import software.amazon.awssdk.services.s3.model.GetObjectResponse; import org.opensearch.common.io.Streams; -import org.opensearch.repositories.s3.utils.HttpRangeUtils; import org.opensearch.test.OpenSearchTestCase; import java.io.ByteArrayInputStream; @@ -104,11 +103,11 @@ public void testRangeInputStreamIsAborted() throws IOException { } private S3RetryingInputStream createInputStream(final byte[] data, final Long start, final Long length) throws IOException { - long end = Math.addExact(start, length - 1); + final long end = Math.addExact(start, length - 1); final S3Client client = mock(S3Client.class); when(client.getObject(any(GetObjectRequest.class))).thenReturn( new ResponseInputStream<>( - GetObjectResponse.builder().contentLength(length).contentRange(HttpRangeUtils.toHttpRangeHeader(start, end)).build(), + GetObjectResponse.builder().contentLength(length).build(), new ByteArrayInputStream(data, Math.toIntExact(start), Math.toIntExact(length)) ) ); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java index 9c07b929052bc..2437547a80a6f 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/AsyncTransferManagerTests.java @@ -33,12 +33,18 @@ import org.opensearch.common.io.InputStreamContainer; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.repositories.blobstore.ZeroInputStream; +import org.opensearch.repositories.s3.StatsMetricPublisher; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicReference; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -58,6 +64,7 @@ public void setUp() throws Exception { asyncTransferManager = new AsyncTransferManager( ByteSizeUnit.MB.toBytes(5), Executors.newSingleThreadExecutor(), + Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor() ); super.setUp(); @@ -70,17 +77,17 @@ public void testOneChunkUpload() { putObjectResponseCompletableFuture ); + AtomicReference streamRef = new AtomicReference<>(); CompletableFuture resultFuture = asyncTransferManager.uploadObject( s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(1), WritePriority.HIGH, uploadSuccess -> { // do nothing }, false, null), - new StreamContext( - (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), - ByteSizeUnit.MB.toBytes(1), - ByteSizeUnit.MB.toBytes(1), - 1 - ) + new StreamContext((partIdx, partSize, position) -> { + streamRef.set(new ZeroInputStream(partSize)); + return new InputStreamContainer(streamRef.get(), partSize, position); + }, ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 1), + new StatsMetricPublisher() ); try { @@ -90,6 +97,14 @@ public void testOneChunkUpload() { } verify(s3AsyncClient, times(1)).putObject(any(PutObjectRequest.class), any(AsyncRequestBody.class)); + + boolean closeError = false; + try { + streamRef.get().available(); + } catch (IOException e) { + closeError = e.getMessage().equals("Stream closed"); + } + assertTrue("InputStream was still open after upload", closeError); } public void testOneChunkUploadCorruption() { @@ -118,7 +133,8 @@ public void testOneChunkUploadCorruption() { ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 1 - ) + ), + new StatsMetricPublisher() ); try { @@ -159,17 +175,18 @@ public void testMultipartUpload() { abortMultipartUploadResponseCompletableFuture ); + List streams = new ArrayList<>(); CompletableFuture resultFuture = asyncTransferManager.uploadObject( s3AsyncClient, new UploadRequest("bucket", "key", ByteSizeUnit.MB.toBytes(5), WritePriority.HIGH, uploadSuccess -> { // do nothing }, true, 3376132981L), - new StreamContext( - (partIdx, partSize, position) -> new InputStreamContainer(new ZeroInputStream(partSize), partSize, position), - ByteSizeUnit.MB.toBytes(1), - ByteSizeUnit.MB.toBytes(1), - 5 - ) + new StreamContext((partIdx, partSize, position) -> { + InputStream stream = new ZeroInputStream(partSize); + streams.add(stream); + return new InputStreamContainer(stream, partSize, position); + }, ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 5), + new StatsMetricPublisher() ); try { @@ -178,6 +195,16 @@ public void testMultipartUpload() { fail("did not expect resultFuture to fail"); } + streams.forEach(stream -> { + boolean closeError = false; + try { + stream.available(); + } catch (IOException e) { + closeError = e.getMessage().equals("Stream closed"); + } + assertTrue("InputStream was still open after upload", closeError); + }); + verify(s3AsyncClient, times(1)).createMultipartUpload(any(CreateMultipartUploadRequest.class)); verify(s3AsyncClient, times(5)).uploadPart(any(UploadPartRequest.class), any(AsyncRequestBody.class)); verify(s3AsyncClient, times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class)); @@ -219,7 +246,8 @@ public void testMultipartUploadCorruption() { ByteSizeUnit.MB.toBytes(1), ByteSizeUnit.MB.toBytes(1), 5 - ) + ), + new StatsMetricPublisher() ); try { diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/utils/HttpRangeUtilsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/utils/HttpRangeUtilsTests.java new file mode 100644 index 0000000000000..9a4267c5266e5 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/utils/HttpRangeUtilsTests.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.s3.utils; + +import software.amazon.awssdk.core.exception.SdkException; + +import org.opensearch.test.OpenSearchTestCase; + +public final class HttpRangeUtilsTests extends OpenSearchTestCase { + + public void testFromHttpRangeHeader() { + String headerValue = "bytes 0-10/200"; + Long offset = HttpRangeUtils.getStartOffsetFromRangeHeader(headerValue); + assertEquals(0L, offset.longValue()); + + headerValue = "bytes 0-10/*"; + offset = HttpRangeUtils.getStartOffsetFromRangeHeader(headerValue); + assertEquals(0L, offset.longValue()); + + final String invalidHeaderValue = "bytes */*"; + assertThrows(SdkException.class, () -> HttpRangeUtils.getStartOffsetFromRangeHeader(invalidHeaderValue)); + } +} diff --git a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java index 2cac58262c75a..e1655cc5e0784 100644 --- a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java +++ b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbMMapDirectoryTests.java @@ -47,9 +47,9 @@ protected Directory getDirectory(Path file) throws IOException { @Override public void testCreateOutputForExistingFile() throws IOException { - /** - * This test is disabled because {@link SmbDirectoryWrapper} opens existing file - * with an explicit StandardOpenOption.TRUNCATE_EXISTING option. + /* + This test is disabled because {@link SmbDirectoryWrapper} opens existing file + with an explicit StandardOpenOption.TRUNCATE_EXISTING option. */ } } diff --git a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java index 7390759029dfc..6f821147c3079 100644 --- a/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java +++ b/plugins/store-smb/src/test/java/org/opensearch/index/store/SmbNIOFSDirectoryTests.java @@ -26,9 +26,9 @@ protected Directory getDirectory(Path file) throws IOException { @Override public void testCreateOutputForExistingFile() throws IOException { - /** - * This test is disabled because {@link SmbDirectoryWrapper} opens existing file - * with an explicit StandardOpenOption.TRUNCATE_EXISTING option. + /* + This test is disabled because {@link SmbDirectoryWrapper} opens existing file + with an explicit StandardOpenOption.TRUNCATE_EXISTING option. */ } } diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 04fa9df9a47d0..f5c367cb7643b 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -28,9 +28,16 @@ dependencies { api "io.opentelemetry:opentelemetry-sdk-trace:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-sdk-metrics:${versions.opentelemetry}" api "io.opentelemetry:opentelemetry-exporter-logging:${versions.opentelemetry}" - api "io.opentelemetry:opentelemetry-semconv:${versions.opentelemetry}-alpha" - api "io.opentelemetry:opentelemetry-sdk-logs:${versions.opentelemetry}-alpha" - api "io.opentelemetry:opentelemetry-api-logs:${versions.opentelemetry}-alpha" + api "io.opentelemetry.semconv:opentelemetry-semconv:${versions.opentelemetrysemconv}" + api "io.opentelemetry:opentelemetry-sdk-logs:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-exporter-otlp:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-exporter-common:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-exporter-otlp-common:${versions.opentelemetry}" + runtimeOnly "org.jetbrains.kotlin:kotlin-stdlib:${versions.kotlin}" + runtimeOnly "com.squareup.okhttp3:okhttp:4.11.0" + runtimeOnly "com.squareup.okio:okio-jvm:3.5.0" + runtimeOnly "io.opentelemetry:opentelemetry-exporter-sender-okhttp:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-extension-incubator:${versions.opentelemetry}-alpha" testImplementation "io.opentelemetry:opentelemetry-sdk-testing:${versions.opentelemetry}" } @@ -45,15 +52,40 @@ thirdPartyAudit { ) ignoreMissingClasses( + 'android.net.http.X509TrustManagerExtensions', + 'android.net.ssl.SSLSockets', + 'android.os.Build$VERSION', + 'android.security.NetworkSecurityPolicy', + 'android.util.Log', + 'com.google.common.io.ByteStreams', + 'com.google.common.util.concurrent.ListenableFuture', + 'io.grpc.CallOptions', + 'io.grpc.Channel', + 'io.grpc.Drainable', + 'io.grpc.KnownLength', + 'io.grpc.ManagedChannel', + 'io.grpc.MethodDescriptor', + 'io.grpc.MethodDescriptor$Builder', + 'io.grpc.MethodDescriptor$Marshaller', + 'io.grpc.MethodDescriptor$MethodType', + 'io.grpc.stub.AbstractFutureStub', + 'io.grpc.stub.AbstractStub', + 'io.grpc.stub.ClientCalls', + 'org.bouncycastle.jsse.BCSSLParameters', + 'org.bouncycastle.jsse.BCSSLSocket', + 'org.conscrypt.Conscrypt', + 'org.conscrypt.Conscrypt$Version', + 'org.conscrypt.ConscryptHostnameVerifier', + 'org.openjsse.javax.net.ssl.SSLParameters', + 'org.openjsse.javax.net.ssl.SSLSocket', 'io.opentelemetry.api.events.EventEmitter', 'io.opentelemetry.api.events.EventEmitterBuilder', 'io.opentelemetry.api.events.EventEmitterProvider', - 'io.opentelemetry.extension.incubator.metrics.ExtendedDoubleHistogramBuilder', - 'io.opentelemetry.extension.incubator.metrics.ExtendedLongHistogramBuilder', - 'io.opentelemetry.extension.incubator.metrics.HistogramAdviceConfigurer', 'io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties', 'io.opentelemetry.sdk.autoconfigure.spi.logs.ConfigurableLogRecordExporterProvider', 'io.opentelemetry.sdk.autoconfigure.spi.metrics.ConfigurableMetricExporterProvider', + 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', + 'kotlin.io.path.PathsKt', 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider' ) } diff --git a/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties b/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties index 544f42bd5513b..8dec1119eec66 100644 --- a/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties +++ b/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties @@ -25,3 +25,23 @@ logger.exporter.name = io.opentelemetry.exporter.logging.LoggingSpanExporter logger.exporter.level = INFO logger.exporter.appenderRef.tracing.ref = tracing logger.exporter.additivity = false + + +appender.metrics.type = RollingFile +appender.metrics.name = metrics +appender.metrics.fileName = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_otel_metrics.log +appender.metrics.filePermissions = rw-r----- +appender.metrics.layout.type = PatternLayout +appender.metrics.layout.pattern = %m%n +appender.metrics.filePattern = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_otel_metrics-%i.log.gz +appender.metrics.policies.type = Policies +appender.metrics.policies.size.type = SizeBasedTriggeringPolicy +appender.metrics.policies.size.size = 1GB +appender.metrics.strategy.type = DefaultRolloverStrategy +appender.metrics.strategy.max = 4 + + +logger.metrics_exporter.name = io.opentelemetry.exporter.logging.LoggingMetricExporter +logger.metrics_exporter.level = INFO +logger.metrics_exporter.appenderRef.tracing.ref = metrics +logger.metrics_exporter.additivity = false diff --git a/plugins/telemetry-otel/licenses/kotlin-stdlib-1.7.10.jar.sha1 b/plugins/telemetry-otel/licenses/kotlin-stdlib-1.7.10.jar.sha1 new file mode 100644 index 0000000000000..4d119fbf4df70 --- /dev/null +++ b/plugins/telemetry-otel/licenses/kotlin-stdlib-1.7.10.jar.sha1 @@ -0,0 +1 @@ +d2abf9e77736acc4450dc4a3f707fa2c10f5099d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/kotlin-stdlib-LICENSE.txt b/plugins/telemetry-otel/licenses/kotlin-stdlib-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/kotlin-stdlib-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/kotlin-stdlib-NOTICE.txt b/plugins/telemetry-otel/licenses/kotlin-stdlib-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/okhttp-4.11.0.jar.sha1 b/plugins/telemetry-otel/licenses/okhttp-4.11.0.jar.sha1 new file mode 100644 index 0000000000000..1fc0db6615cb5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/okhttp-4.11.0.jar.sha1 @@ -0,0 +1 @@ +436932d695b2c43f2c86b8111c596179cd133d56 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/okhttp-LICENSE.txt b/plugins/telemetry-otel/licenses/okhttp-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/okhttp-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/okhttp-NOTICE.txt b/plugins/telemetry-otel/licenses/okhttp-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/okio-jvm-3.5.0.jar.sha1 b/plugins/telemetry-otel/licenses/okio-jvm-3.5.0.jar.sha1 new file mode 100644 index 0000000000000..7b19d32d872fa --- /dev/null +++ b/plugins/telemetry-otel/licenses/okio-jvm-3.5.0.jar.sha1 @@ -0,0 +1 @@ +d6a0bc7343210eff7dd5cfdd6eb9b5f0036638ce \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/okio-jvm-LICENSE.txt b/plugins/telemetry-otel/licenses/okio-jvm-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/okio-jvm-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/okio-jvm-NOTICE.txt b/plugins/telemetry-otel/licenses/okio-jvm-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.26.0.jar.sha1 deleted file mode 100644 index da3abcc8f70d2..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.26.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7ee1ccca95155e4640094ba8dfbd0bb8c1709c83 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..eae141a8d1a23 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.31.0.jar.sha1 @@ -0,0 +1 @@ +bb24a44d73484c681c236aed84fe6c28d17f30e2 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-logs-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-logs-1.26.0-alpha.jar.sha1 deleted file mode 100644 index 2c233d785dcb2..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-logs-1.26.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1b0b6c1a20da0f841634d4f736e331aa4871a4db \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.26.0.jar.sha1 deleted file mode 100644 index 01d9fd732249b..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.26.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -42991f523a7a10761213e2f11633c67c8beaed88 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..6e42973adc581 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.31.0.jar.sha1 @@ -0,0 +1 @@ +b8004737f7a970124e36ac71fde8eb88423e8cee \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..b119468e7f88b --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.31.0.jar.sha1 @@ -0,0 +1 @@ +b7b4baf5f9af72d5eb8a231dfb114ae31c57150d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.26.0.jar.sha1 deleted file mode 100644 index ef07e4cb81e34..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.26.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1b932170774da5e766440fa058d879f68fe2c5dd \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..8f653922d6418 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.31.0.jar.sha1 @@ -0,0 +1 @@ +260e5363dad83a0ae65c16ad6a3dd2914e0db201 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..103da4720de96 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.31.0.jar.sha1 @@ -0,0 +1 @@ +b6454464425dfd81519070caeca3824558a2f1ae \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..3db07532ceea9 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.31.0.jar.sha1 @@ -0,0 +1 @@ +d8c22b6851bbc3dbf5d2387b9bde158ed5416ba4 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..10d9b7cdfe3e3 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.31.0.jar.sha1 @@ -0,0 +1 @@ +dd209381d58cfe81a989e29c9ca26d97c8dabd7a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-LICENSE.txt new file mode 100644 index 0000000000000..6b0b1270ff0ca --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-LICENSE.txt @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..162890965a6eb --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.31.0-alpha.jar.sha1 @@ -0,0 +1 @@ +6c9f5c063309d92b6dd28bff0667f54b63afd36f \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.26.0.jar.sha1 deleted file mode 100644 index dc9946de3b160..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.26.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79a86f258ede8625627e8fbdff07d1149c88a8e6 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..d6ce31a31cc6f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.31.0.jar.sha1 @@ -0,0 +1 @@ +2b2093be08a09ac536292bf6cecf8129cc7fb191 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.26.0.jar.sha1 deleted file mode 100644 index 2bd3e60a1faf6..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.26.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b42359d2232f8d802d55153be5330b1d9e21ee15 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..8a6a9705d836d --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.31.0.jar.sha1 @@ -0,0 +1 @@ +f492528288236e097e12fc1c45963dd82c70d33c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.26.0-alpha.jar.sha1 deleted file mode 100644 index 90bb8202c4c9d..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.26.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a8abeaee240291cce9067f07569f151d11a6275a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..37d79f5c573f7 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.31.0.jar.sha1 @@ -0,0 +1 @@ +a63a203d3dc6f8875f8c26b9e3b522dc9a3f6280 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.26.0.jar.sha1 deleted file mode 100644 index 62396a603423f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.26.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8c4af22d7d92a3a79714be3f79724b0ab774ba9e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..80179e4808f50 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.31.0.jar.sha1 @@ -0,0 +1 @@ +47cc23762fae728d68e4fda1dfb71986ae0b8b3e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.26.0.jar.sha1 deleted file mode 100644 index 0fcebee353105..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.26.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fcc5785b2cf2be897f31b927e24b53e46e377388 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 new file mode 100644 index 0000000000000..fd917a58ba77c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.31.0.jar.sha1 @@ -0,0 +1 @@ +a3941197cfb8ae9eb9e482073480c0c3918b746c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..77b12c99464f6 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.21.0-alpha.jar.sha1 @@ -0,0 +1 @@ +207660e74d1e155272e9559fd4d27854b92fc6ac \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 deleted file mode 100644 index 47c7ece8c9f6c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1f4f963673f8209208f868666cd43e79b9a2dd15 \ No newline at end of file diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/IntegrationTestOTelTelemetryPlugin.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/IntegrationTestOTelTelemetryPlugin.java similarity index 72% rename from plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/IntegrationTestOTelTelemetryPlugin.java rename to plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/IntegrationTestOTelTelemetryPlugin.java index 57dbf4e001be4..45caf8bf5f60b 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/IntegrationTestOTelTelemetryPlugin.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/IntegrationTestOTelTelemetryPlugin.java @@ -6,12 +6,9 @@ * compatible open source license. */ -package org.opensearch.telemetry.tracing; +package org.opensearch.telemetry; import org.opensearch.common.settings.Settings; -import org.opensearch.telemetry.OTelTelemetryPlugin; -import org.opensearch.telemetry.Telemetry; -import org.opensearch.telemetry.TelemetrySettings; import java.util.Optional; @@ -32,10 +29,10 @@ public IntegrationTestOTelTelemetryPlugin(Settings settings) { /** * This method overrides getTelemetry() method in OTel plugin class, so we create only one instance of global OpenTelemetry * resetForTest() will set OpenTelemetry to null again. - * @param settings cluster settings + * @param telemetrySettings telemetry settings */ - public Optional getTelemetry(TelemetrySettings settings) { + public Optional getTelemetry(TelemetrySettings telemetrySettings) { GlobalOpenTelemetry.resetForTest(); - return super.getTelemetry(settings); + return super.getTelemetry(telemetrySettings); } } diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/InMemorySingletonMetricsExporter.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/InMemorySingletonMetricsExporter.java new file mode 100644 index 0000000000000..74fc872cb30e3 --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/InMemorySingletonMetricsExporter.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import java.util.Collection; +import java.util.List; + +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricExporter; + +public class InMemorySingletonMetricsExporter implements MetricExporter { + + public static final InMemorySingletonMetricsExporter INSTANCE = new InMemorySingletonMetricsExporter(InMemoryMetricExporter.create()); + + private static InMemoryMetricExporter delegate; + + public static InMemorySingletonMetricsExporter create() { + return INSTANCE; + } + + private InMemorySingletonMetricsExporter(InMemoryMetricExporter delegate) { + InMemorySingletonMetricsExporter.delegate = delegate; + } + + @Override + public CompletableResultCode export(Collection metrics) { + return delegate.export(metrics); + } + + @Override + public CompletableResultCode flush() { + return delegate.flush(); + } + + @Override + public CompletableResultCode shutdown() { + return delegate.shutdown(); + } + + public List getFinishedMetricItems() { + return delegate.getFinishedMetricItems(); + } + + /** + * Clears the state. + */ + public void reset() { + delegate.reset(); + } + + @Override + public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { + return delegate.getAggregationTemporality(instrumentType); + } +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java new file mode 100644 index 0000000000000..bcdcb657c4f42 --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsDisabledSanityIT.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.noop.NoopCounter; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, minNumDataNodes = 1) +public class TelemetryMetricsDisabledSanityIT extends OpenSearchIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING.getKey(), false) + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.metrics.InMemorySingletonMetricsExporter" + ) + .put(TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(IntegrationTestOTelTelemetryPlugin.class); + } + + @Override + protected boolean addMockTelemetryPlugin() { + return false; + } + + public void testSanityChecksWhenMetricsDisabled() throws Exception { + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + + Counter counter = metricsRegistry.createCounter("test-counter", "test", "1"); + counter.add(1.0); + + Thread.sleep(2000); + + assertTrue(metricsRegistry instanceof NoopMetricsRegistry); + assertTrue(counter instanceof NoopCounter); + } + +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java new file mode 100644 index 0000000000000..ed341595d327d --- /dev/null +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/metrics/TelemetryMetricsEnabledSanityIT.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.stream.Collectors; + +import io.opentelemetry.sdk.metrics.data.DoublePointData; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, minNumDataNodes = 1) +public class TelemetryMetricsEnabledSanityIT extends OpenSearchIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING.getKey(), true) + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.metrics.InMemorySingletonMetricsExporter" + ) + .put(TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(IntegrationTestOTelTelemetryPlugin.class); + } + + @Override + protected boolean addMockTelemetryPlugin() { + return false; + } + + public void testCounter() throws Exception { + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + InMemorySingletonMetricsExporter.INSTANCE.reset(); + + Counter counter = metricsRegistry.createCounter("test-counter", "test", "1"); + counter.add(1.0); + // Sleep for about 2s to wait for metrics to be published. + Thread.sleep(2000); + + InMemorySingletonMetricsExporter exporter = InMemorySingletonMetricsExporter.INSTANCE; + double value = ((DoublePointData) ((ArrayList) exporter.getFinishedMetricItems() + .stream() + .filter(a -> a.getName().equals("test-counter")) + .collect(Collectors.toList()) + .get(0) + .getDoubleSumData() + .getPoints()).get(0)).getValue(); + assertEquals(1.0, value, 0.0); + } + + public void testUpDownCounter() throws Exception { + + MetricsRegistry metricsRegistry = internalCluster().getInstance(MetricsRegistry.class); + InMemorySingletonMetricsExporter.INSTANCE.reset(); + + Counter counter = metricsRegistry.createUpDownCounter("test-up-down-counter", "test", "1"); + counter.add(1.0); + counter.add(-2.0); + // Sleep for about 2s to wait for metrics to be published. + Thread.sleep(2000); + + InMemorySingletonMetricsExporter exporter = InMemorySingletonMetricsExporter.INSTANCE; + double value = ((DoublePointData) ((ArrayList) exporter.getFinishedMetricItems() + .stream() + .filter(a -> a.getName().equals("test-up-down-counter")) + .collect(Collectors.toList()) + .get(0) + .getDoubleSumData() + .getPoints()).get(0)).getValue(); + assertEquals(-1.0, value, 0.0); + } + + @After + public void reset() { + InMemorySingletonMetricsExporter.INSTANCE.reset(); + } +} diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/InMemorySingletonSpanExporter.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/InMemorySingletonSpanExporter.java index 5fb6acc8346db..6dd451ea37465 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/InMemorySingletonSpanExporter.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/InMemorySingletonSpanExporter.java @@ -11,7 +11,9 @@ import org.opensearch.test.telemetry.tracing.MockSpanData; import java.util.Collection; +import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import io.opentelemetry.sdk.common.CompletableResultCode; @@ -21,7 +23,7 @@ public class InMemorySingletonSpanExporter implements SpanExporter { - private static final InMemorySingletonSpanExporter INSTANCE = new InMemorySingletonSpanExporter(InMemorySpanExporter.create()); + public static final InMemorySingletonSpanExporter INSTANCE = new InMemorySingletonSpanExporter(InMemorySpanExporter.create()); private static InMemorySpanExporter delegate; @@ -62,10 +64,30 @@ private List convertSpanDataListToMockSpanDataList(List spanData.getStartEpochNanos(), spanData.getEndEpochNanos(), spanData.hasEnded(), - spanData.getName() + spanData.getName(), + getAttributes(spanData) ) ) .collect(Collectors.toList()); return mockSpanDataList; } + + private Map getAttributes(SpanData spanData) { + if (spanData.getAttributes() != null) { + return spanData.getAttributes() + .asMap() + .entrySet() + .stream() + .collect(Collectors.toMap(e -> e.getKey().getKey(), e -> e.getValue())); + } else { + return Collections.emptyMap(); + } + } + + /** + * Clears the state. + */ + public void reset() { + delegate.reset(); + } } diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java index 476a5a9cabdc7..45ed140e1be94 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerDisabledSanityIT.java @@ -12,6 +12,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; import org.opensearch.telemetry.OTelTelemetrySettings; import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.test.OpenSearchIntegTestCase; @@ -63,6 +64,8 @@ public void testSanityCheckWhenTracingDisabled() throws Exception { ensureGreen(); refresh(); + InMemorySingletonSpanExporter exporter = InMemorySingletonSpanExporter.INSTANCE; + exporter.reset(); // Make the search call; client.prepareSearch().setQuery(queryStringQuery("fox")).get(); @@ -70,7 +73,6 @@ public void testSanityCheckWhenTracingDisabled() throws Exception { // Sleep for about 3s to wait for traces are published (the delay is 1s) Thread.sleep(3000); - InMemorySingletonSpanExporter exporter = InMemorySingletonSpanExporter.create(); assertTrue(exporter.getFinishedSpanItems().isEmpty()); } diff --git a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java index 9f99099e85c9f..f07f2b308e801 100644 --- a/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java +++ b/plugins/telemetry-otel/src/internalClusterTest/java/org/opensearch/telemetry/tracing/TelemetryTracerEnabledSanityIT.java @@ -12,8 +12,10 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.IntegrationTestOTelTelemetryPlugin; import org.opensearch.telemetry.OTelTelemetrySettings; import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.telemetry.tracing.TelemetryValidators; import org.opensearch.test.telemetry.tracing.validators.AllSpansAreEndedProperly; @@ -38,6 +40,7 @@ protected Settings nodeSettings(int nodeOrdinal) { "org.opensearch.telemetry.tracing.InMemorySingletonSpanExporter" ) .put(OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .put(TelemetrySettings.TRACER_SAMPLER_PROBABILITY.getKey(), 1.0d) .build(); } @@ -52,13 +55,9 @@ protected boolean addMockTelemetryPlugin() { } public void testSanityChecksWhenTracingEnabled() throws Exception { - Client client = client(); + Client client = internalCluster().clusterManagerClient(); // ENABLE TRACING - client.admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true)) - .get(); + updateTelemetrySetting(client, true); // Create Index and ingest data String indexName = "test-index-11"; @@ -70,9 +69,12 @@ public void testSanityChecksWhenTracingEnabled() throws Exception { ensureGreen(); refresh(); - // Make the search calls; - client.prepareSearch().setQuery(queryStringQuery("fox")).get(); - client.prepareSearch().setQuery(queryStringQuery("jumps")).get(); + // Make the search calls; adding the searchType and PreFilterShardSize to make the query path predictable across all the runs. + client.prepareSearch().setSearchType("query_then_fetch").setPreFilterShardSize(3).setQuery(queryStringQuery("fox")).get(); + client.prepareSearch().setSearchType("query_then_fetch").setPreFilterShardSize(3).setQuery(queryStringQuery("jumps")).get(); + + ensureGreen(); + refresh(); // Sleep for about 3s to wait for traces are published, delay is (the delay is 1s). Thread.sleep(3000); @@ -81,15 +83,21 @@ public void testSanityChecksWhenTracingEnabled() throws Exception { Arrays.asList( new AllSpansAreEndedProperly(), new AllSpansHaveUniqueId(), - new NumberOfTraceIDsEqualToRequests(), + new NumberOfTraceIDsEqualToRequests(Attributes.create().addAttribute("action", "indices:data/read/search[phase/query]")), new TotalRootSpansEqualToRequests() ) ); - InMemorySingletonSpanExporter exporter = InMemorySingletonSpanExporter.create(); - if (!exporter.getFinishedSpanItems().isEmpty()) { - validators.validate(exporter.getFinishedSpanItems(), 2); - } + InMemorySingletonSpanExporter exporter = InMemorySingletonSpanExporter.INSTANCE; + validators.validate(exporter.getFinishedSpanItems(), 6); + } + + private static void updateTelemetrySetting(Client client, boolean value) { + client.admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), value)) + .get(); } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelAttributesConverter.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelAttributesConverter.java similarity index 71% rename from plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelAttributesConverter.java rename to plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelAttributesConverter.java index 4d0966e6b5185..98d265e92ba3c 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelAttributesConverter.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelAttributesConverter.java @@ -6,7 +6,9 @@ * compatible open source license. */ -package org.opensearch.telemetry.tracing; +package org.opensearch.telemetry; + +import org.opensearch.telemetry.metrics.tags.Tags; import java.util.Locale; @@ -16,7 +18,7 @@ /** * Converts {@link org.opensearch.telemetry.tracing.attributes.Attributes} to OTel {@link Attributes} */ -final class OTelAttributesConverter { +public final class OTelAttributesConverter { /** * Constructor. @@ -28,7 +30,7 @@ private OTelAttributesConverter() {} * @param attributes attributes * @return otel attributes. */ - static Attributes convert(org.opensearch.telemetry.tracing.attributes.Attributes attributes) { + public static Attributes convert(org.opensearch.telemetry.tracing.attributes.Attributes attributes) { AttributesBuilder attributesBuilder = Attributes.builder(); if (attributes != null) { attributes.getAttributesMap().forEach((x, y) -> addSpanAttribute(x, y, attributesBuilder)); @@ -49,4 +51,17 @@ private static void addSpanAttribute(String key, Object value, AttributesBuilder throw new IllegalArgumentException(String.format(Locale.ROOT, "Span attribute value %s type not supported", value)); } } + + /** + * Attribute converter. + * @param tags attributes + * @return otel attributes. + */ + public static Attributes convert(Tags tags) { + AttributesBuilder attributesBuilder = Attributes.builder(); + if (tags != null) { + tags.getTagsMap().forEach((x, y) -> addSpanAttribute(x, y, attributesBuilder)); + } + return attributesBuilder.build(); + } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java index a1ca3adf4d2a2..297ae8873636f 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java @@ -8,28 +8,36 @@ package org.opensearch.telemetry; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.TelemetryPlugin; -import org.opensearch.telemetry.metrics.MetricsTelemetry; import org.opensearch.telemetry.tracing.OTelResourceProvider; import org.opensearch.telemetry.tracing.OTelTelemetry; -import org.opensearch.telemetry.tracing.OTelTracingTelemetry; import java.util.Arrays; import java.util.List; import java.util.Optional; +import io.opentelemetry.sdk.OpenTelemetrySdk; + /** * Telemetry plugin based on Otel */ public class OTelTelemetryPlugin extends Plugin implements TelemetryPlugin { + /** + * Instrumentation scope name. + */ + public static final String INSTRUMENTATION_SCOPE_NAME = "org.opensearch.telemetry"; + static final String OTEL_TRACER_NAME = "otel"; private final Settings settings; + private RefCountedReleasable refCountedOpenTelemetry; + /** * Creates Otel plugin * @param settings cluster settings @@ -44,13 +52,23 @@ public List> getSettings() { OTelTelemetrySettings.TRACER_EXPORTER_BATCH_SIZE_SETTING, OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING, OTelTelemetrySettings.TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING, - OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING + OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING, + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING ); } @Override - public Optional getTelemetry(TelemetrySettings settings) { - return Optional.of(telemetry()); + public Optional getTelemetry(TelemetrySettings telemetrySettings) { + initializeOpenTelemetrySdk(telemetrySettings); + return Optional.of(telemetry(telemetrySettings)); + } + + private void initializeOpenTelemetrySdk(TelemetrySettings telemetrySettings) { + if (refCountedOpenTelemetry != null) { + return; + } + OpenTelemetrySdk openTelemetrySdk = OTelResourceProvider.get(telemetrySettings, settings); + refCountedOpenTelemetry = new RefCountedReleasable<>("openTelemetry", openTelemetrySdk, openTelemetrySdk::close); } @Override @@ -58,9 +76,15 @@ public String getName() { return OTEL_TRACER_NAME; } - private Telemetry telemetry() { - return new OTelTelemetry(new OTelTracingTelemetry(OTelResourceProvider.get(settings)), new MetricsTelemetry() { - }); + private Telemetry telemetry(TelemetrySettings telemetrySettings) { + return new OTelTelemetry(refCountedOpenTelemetry); + } + + @Override + public void close() { + if (refCountedOpenTelemetry != null) { + refCountedOpenTelemetry.close(); + } } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java index 59c87cca22986..8e23f724b4570 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetrySettings.java @@ -11,13 +11,16 @@ import org.opensearch.SpecialPermission; import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.TimeValue; +import org.opensearch.telemetry.metrics.exporter.OTelMetricsExporterFactory; import org.opensearch.telemetry.tracing.exporter.OTelSpanExporterFactory; import java.security.AccessController; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; +import io.opentelemetry.exporter.logging.LoggingMetricExporter; import io.opentelemetry.exporter.logging.LoggingSpanExporter; +import io.opentelemetry.sdk.metrics.export.MetricExporter; import io.opentelemetry.sdk.trace.export.SpanExporter; /** @@ -83,4 +86,28 @@ private OTelTelemetrySettings() {} Setting.Property.NodeScope, Setting.Property.Final ); + + /** + * Metrics Exporter type setting. + */ + @SuppressWarnings("unchecked") + public static final Setting> OTEL_METRICS_EXPORTER_CLASS_SETTING = new Setting<>( + "telemetry.otel.metrics.exporter.class", + LoggingMetricExporter.class.getName(), + className -> { + // Check we ourselves are not being called by unprivileged code. + SpecialPermission.check(); + + try { + return AccessController.doPrivileged((PrivilegedExceptionAction>) () -> { + final ClassLoader loader = OTelMetricsExporterFactory.class.getClassLoader(); + return (Class) loader.loadClass(className); + }); + } catch (PrivilegedActionException ex) { + throw new IllegalStateException("Unable to load span exporter class:" + className, ex.getCause()); + } + }, + Setting.Property.NodeScope, + Setting.Property.Final + ); } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelCounter.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelCounter.java new file mode 100644 index 0000000000000..b72f63e027243 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelCounter.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.metrics.tags.Tags; + +import io.opentelemetry.api.metrics.DoubleCounter; + +/** + * OTel Counter + */ +class OTelCounter implements Counter { + + private final DoubleCounter otelDoubleCounter; + + /** + * Constructor + * @param otelDoubleCounter delegate counter. + */ + public OTelCounter(DoubleCounter otelDoubleCounter) { + this.otelDoubleCounter = otelDoubleCounter; + } + + @Override + public void add(double value) { + otelDoubleCounter.add(value); + } + + @Override + public void add(double value, Tags tags) { + otelDoubleCounter.add(value, OTelAttributesConverter.convert(tags)); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java new file mode 100644 index 0000000000000..6160e5106c041 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetry.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.concurrent.RefCountedReleasable; +import org.opensearch.telemetry.OTelTelemetryPlugin; + +import java.io.Closeable; +import java.io.IOException; +import java.security.AccessController; +import java.security.PrivilegedAction; + +import io.opentelemetry.api.metrics.DoubleCounter; +import io.opentelemetry.api.metrics.DoubleUpDownCounter; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.api.metrics.MeterProvider; +import io.opentelemetry.sdk.OpenTelemetrySdk; + +/** + * OTel implementation for {@link MetricsTelemetry} + */ +public class OTelMetricsTelemetry implements MetricsTelemetry { + private final RefCountedReleasable refCountedOpenTelemetry; + private final Meter otelMeter; + private final T meterProvider; + + /** + * Creates OTel based {@link MetricsTelemetry}. + * @param openTelemetry open telemetry. + * @param meterProvider {@link MeterProvider} instance + */ + public OTelMetricsTelemetry(RefCountedReleasable openTelemetry, T meterProvider) { + this.refCountedOpenTelemetry = openTelemetry; + this.refCountedOpenTelemetry.incRef(); + this.meterProvider = meterProvider; + this.otelMeter = meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME); + } + + @Override + public Counter createCounter(String name, String description, String unit) { + DoubleCounter doubleCounter = AccessController.doPrivileged( + (PrivilegedAction) () -> otelMeter.counterBuilder(name) + .setUnit(unit) + .setDescription(description) + .ofDoubles() + .build() + ); + return new OTelCounter(doubleCounter); + } + + @Override + public Counter createUpDownCounter(String name, String description, String unit) { + DoubleUpDownCounter doubleUpDownCounter = AccessController.doPrivileged( + (PrivilegedAction) () -> otelMeter.upDownCounterBuilder(name) + .setUnit(unit) + .setDescription(description) + .ofDoubles() + .build() + ); + return new OTelUpDownCounter(doubleUpDownCounter); + } + + @Override + public void close() throws IOException { + meterProvider.close(); + refCountedOpenTelemetry.close(); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelUpDownCounter.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelUpDownCounter.java new file mode 100644 index 0000000000000..2f40881996f7e --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/OTelUpDownCounter.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.metrics.tags.Tags; + +import io.opentelemetry.api.metrics.DoubleUpDownCounter; + +/** + * OTel Counter + */ +public class OTelUpDownCounter implements Counter { + + private final DoubleUpDownCounter doubleUpDownCounter; + + /** + * Constructor + * @param doubleUpDownCounter delegate counter. + */ + public OTelUpDownCounter(DoubleUpDownCounter doubleUpDownCounter) { + this.doubleUpDownCounter = doubleUpDownCounter; + } + + @Override + public void add(double value) { + doubleUpDownCounter.add(value); + } + + @Override + public void add(double value, Tags tags) { + doubleUpDownCounter.add(value, OTelAttributesConverter.convert(tags)); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java new file mode 100644 index 0000000000000..ef5a31e4003ca --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactory.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.exporter; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.SpecialPermission; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.OTelTelemetrySettings; + +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; +import java.lang.reflect.Method; +import java.security.AccessController; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; + +import io.opentelemetry.sdk.metrics.export.MetricExporter; + +/** + * Factory class to create the {@link MetricExporter} instance. + */ +public class OTelMetricsExporterFactory { + + private static final Logger logger = LogManager.getLogger(OTelMetricsExporterFactory.class); + + /** + * Base constructor. + */ + private OTelMetricsExporterFactory() { + + } + + /** + * Creates the {@link MetricExporter} instances based on the OTEL_METRIC_EXPORTER_CLASS_SETTING value. + * As of now, it expects the MetricExporter implementations to have a create factory method to instantiate the + * MetricExporter. + * @param settings settings. + * @return MetricExporter instance. + */ + public static MetricExporter create(Settings settings) { + Class MetricExporterProviderClass = OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.get(settings); + MetricExporter metricExporter = instantiateExporter(MetricExporterProviderClass); + logger.info("Successfully instantiated the Metrics MetricExporter class {}", MetricExporterProviderClass); + return metricExporter; + } + + private static MetricExporter instantiateExporter(Class exporterProviderClass) { + try { + // Check we ourselves are not being called by unprivileged code. + SpecialPermission.check(); + return AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + String methodName = "create"; + String getDefaultMethod = "getDefault"; + for (Method m : exporterProviderClass.getMethods()) { + if (m.getName().equals(getDefaultMethod)) { + methodName = getDefaultMethod; + break; + } + } + try { + return (MetricExporter) MethodHandles.publicLookup() + .findStatic(exporterProviderClass, methodName, MethodType.methodType(exporterProviderClass)) + .asType(MethodType.methodType(MetricExporter.class)) + .invokeExact(); + } catch (Throwable e) { + if (e.getCause() instanceof NoSuchMethodException) { + throw new IllegalStateException("No create factory method exist in [" + exporterProviderClass.getName() + "]"); + } else { + throw new IllegalStateException( + "MetricExporter instantiation failed for class [" + exporterProviderClass.getName() + "]", + e.getCause() + ); + } + } + }); + } catch (PrivilegedActionException ex) { + throw new IllegalStateException( + "MetricExporter instantiation failed for class [" + exporterProviderClass.getName() + "]", + ex.getCause() + ); + } + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/package-info.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/package-info.java new file mode 100644 index 0000000000000..b48ec3e2336c4 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/exporter/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.metrics.exporter; diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/package-info.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/package-info.java new file mode 100644 index 0000000000000..803c159eb201a --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/metrics/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.metrics; diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java index 1ec4818b8b73e..14a19f122c17b 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java @@ -9,21 +9,28 @@ package org.opensearch.telemetry.tracing; import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.exporter.OTelMetricsExporterFactory; import org.opensearch.telemetry.tracing.exporter.OTelSpanExporterFactory; +import org.opensearch.telemetry.tracing.sampler.ProbabilisticSampler; +import org.opensearch.telemetry.tracing.sampler.RequestSampler; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.util.concurrent.TimeUnit; -import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; import io.opentelemetry.context.propagation.ContextPropagators; import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; import io.opentelemetry.sdk.resources.Resource; import io.opentelemetry.sdk.trace.SdkTracerProvider; import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; import io.opentelemetry.sdk.trace.export.SpanExporter; import io.opentelemetry.sdk.trace.samplers.Sampler; -import io.opentelemetry.semconv.resource.attributes.ResourceAttributes; +import io.opentelemetry.semconv.ResourceAttributes; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_BATCH_SIZE_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING; @@ -37,15 +44,18 @@ private OTelResourceProvider() {} /** * Creates OpenTelemetry instance with default configuration + * @param telemetrySettings telemetry settings * @param settings cluster settings - * @return OpenTelemetry instance + * @return OpenTelemetrySdk instance */ - public static OpenTelemetry get(Settings settings) { - return get( - settings, - OTelSpanExporterFactory.create(settings), - ContextPropagators.create(W3CTraceContextPropagator.getInstance()), - Sampler.alwaysOn() + public static OpenTelemetrySdk get(TelemetrySettings telemetrySettings, Settings settings) { + return AccessController.doPrivileged( + (PrivilegedAction) () -> get( + settings, + OTelSpanExporterFactory.create(settings), + ContextPropagators.create(W3CTraceContextPropagator.getInstance()), + Sampler.parentBased(new RequestSampler(new ProbabilisticSampler(telemetrySettings))) + ) ); } @@ -55,17 +65,46 @@ public static OpenTelemetry get(Settings settings) { * @param spanExporter span exporter instance * @param contextPropagators context propagator instance * @param sampler sampler instance - * @return Opentelemetry instance + * @return OpenTelemetrySdk instance */ - public static OpenTelemetry get(Settings settings, SpanExporter spanExporter, ContextPropagators contextPropagators, Sampler sampler) { + public static OpenTelemetrySdk get( + Settings settings, + SpanExporter spanExporter, + ContextPropagators contextPropagators, + Sampler sampler + ) { Resource resource = Resource.create(Attributes.of(ResourceAttributes.SERVICE_NAME, "OpenSearch")); - SdkTracerProvider sdkTracerProvider = SdkTracerProvider.builder() + SdkTracerProvider sdkTracerProvider = createSdkTracerProvider(settings, spanExporter, sampler, resource); + SdkMeterProvider sdkMeterProvider = createSdkMetricProvider(settings, resource); + return OpenTelemetrySdk.builder() + .setTracerProvider(sdkTracerProvider) + .setMeterProvider(sdkMeterProvider) + .setPropagators(contextPropagators) + .buildAndRegisterGlobal(); + } + + private static SdkMeterProvider createSdkMetricProvider(Settings settings, Resource resource) { + return SdkMeterProvider.builder() + .setResource(resource) + .registerMetricReader( + PeriodicMetricReader.builder(OTelMetricsExporterFactory.create(settings)) + .setInterval(TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING.get(settings).getSeconds(), TimeUnit.SECONDS) + .build() + ) + .build(); + } + + private static SdkTracerProvider createSdkTracerProvider( + Settings settings, + SpanExporter spanExporter, + Sampler sampler, + Resource resource + ) { + return SdkTracerProvider.builder() .addSpanProcessor(spanProcessor(settings, spanExporter)) .setResource(resource) .setSampler(sampler) .build(); - - return OpenTelemetrySdk.builder().setTracerProvider(sdkTracerProvider).setPropagators(contextPropagators).buildAndRegisterGlobal(); } private static BatchSpanProcessor spanProcessor(Settings settings, SpanExporter spanExporter) { diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java index ba63df4ae47a1..fc917968579e1 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java @@ -19,6 +19,12 @@ class OTelSpan extends AbstractSpan { private final Span delegateSpan; + /** + * Constructor + * @param spanName span name + * @param span the delegate span + * @param parentSpan the parent span + */ public OTelSpan(String spanName, Span span, org.opensearch.telemetry.tracing.Span parentSpan) { super(spanName, parentSpan); this.delegateSpan = span; @@ -51,7 +57,9 @@ public void addAttribute(String key, Boolean value) { @Override public void setError(Exception exception) { - delegateSpan.setStatus(StatusCode.ERROR, exception.getMessage()); + if (exception != null) { + delegateSpan.setStatus(StatusCode.ERROR, exception.getMessage()); + } } @Override diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpanKindConverter.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpanKindConverter.java new file mode 100644 index 0000000000000..4edb837082126 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpanKindConverter.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import io.opentelemetry.api.trace.SpanKind; + +/** + * Converts {@link org.opensearch.telemetry.tracing.SpanKind} to OTel {@link SpanKind} + */ +final class OTelSpanKindConverter { + + /** + * Constructor. + */ + private OTelSpanKindConverter() {} + + /** + * SpanKind converter. + * @param spanKind span kind. + * @return otel attributes. + */ + static SpanKind convert(org.opensearch.telemetry.tracing.SpanKind spanKind) { + if (spanKind == null) { + return SpanKind.INTERNAL; + } else { + switch (spanKind) { + case CLIENT: + return SpanKind.CLIENT; + case SERVER: + return SpanKind.SERVER; + case INTERNAL: + default: + return SpanKind.INTERNAL; + } + } + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java index 282fabd43346b..0c697d2cc5e8c 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java @@ -8,34 +8,39 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.concurrent.RefCountedReleasable; import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.telemetry.metrics.OTelMetricsTelemetry; + +import io.opentelemetry.sdk.OpenTelemetrySdk; /** * Otel implementation of Telemetry */ public class OTelTelemetry implements Telemetry { - private final TracingTelemetry tracingTelemetry; - private final MetricsTelemetry metricsTelemetry; + private final RefCountedReleasable refCountedOpenTelemetry; /** * Creates Telemetry instance - * @param tracingTelemetry tracing telemetry - * @param metricsTelemetry metrics telemetry + + */ + /** + * Creates Telemetry instance + * @param refCountedOpenTelemetry open telemetry. */ - public OTelTelemetry(TracingTelemetry tracingTelemetry, MetricsTelemetry metricsTelemetry) { - this.tracingTelemetry = tracingTelemetry; - this.metricsTelemetry = metricsTelemetry; + public OTelTelemetry(RefCountedReleasable refCountedOpenTelemetry) { + this.refCountedOpenTelemetry = refCountedOpenTelemetry; } @Override public TracingTelemetry getTracingTelemetry() { - return tracingTelemetry; + return new OTelTracingTelemetry<>(refCountedOpenTelemetry, refCountedOpenTelemetry.get().getSdkTracerProvider()); } @Override public MetricsTelemetry getMetricsTelemetry() { - return metricsTelemetry; + return new OTelMetricsTelemetry<>(refCountedOpenTelemetry, refCountedOpenTelemetry.get().getSdkMeterProvider()); } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java index 739a6367ccb2e..f8fe885ee450c 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java @@ -8,7 +8,12 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.core.common.Strings; + +import java.util.Collections; +import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.function.BiConsumer; import io.opentelemetry.api.OpenTelemetry; @@ -32,8 +37,12 @@ public OTelTracingContextPropagator(OpenTelemetry openTelemetry) { } @Override - public Span extract(Map props) { + public Optional extract(Map props) { Context context = openTelemetry.getPropagators().getTextMapPropagator().extract(Context.current(), props, TEXT_MAP_GETTER); + return Optional.ofNullable(getPropagatedSpan(context)); + } + + private static OTelPropagatedSpan getPropagatedSpan(Context context) { if (context != null) { io.opentelemetry.api.trace.Span span = io.opentelemetry.api.trace.Span.fromContext(context); return new OTelPropagatedSpan(span); @@ -41,6 +50,12 @@ public Span extract(Map props) { return null; } + @Override + public Optional extractFromHeaders(Map> headers) { + Context context = openTelemetry.getPropagators().getTextMapPropagator().extract(Context.current(), headers, HEADER_TEXT_MAP_GETTER); + return Optional.ofNullable(getPropagatedSpan(context)); + } + @Override public void inject(Span currentSpan, BiConsumer setter) { openTelemetry.getPropagators().getTextMapPropagator().inject(context((OTelSpan) currentSpan), setter, TEXT_MAP_SETTER); @@ -72,4 +87,23 @@ public String get(Map headers, String key) { } }; + private static final TextMapGetter>> HEADER_TEXT_MAP_GETTER = new TextMapGetter<>() { + @Override + public Iterable keys(Map> headers) { + if (headers != null) { + return headers.keySet(); + } else { + return Collections.emptySet(); + } + } + + @Override + public String get(Map> headers, String key) { + if (headers != null && headers.containsKey(key)) { + return Strings.collectionToCommaDelimitedString(headers.get(key)); + } + return null; + } + }; + } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java index 9a3a10e63503e..af39617a8c744 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java @@ -8,65 +8,76 @@ package org.opensearch.telemetry.tracing; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.common.concurrent.RefCountedReleasable; +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.OTelTelemetryPlugin; import java.io.Closeable; import java.io.IOException; -import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.TracerProvider; import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.OpenTelemetrySdk; /** * OTel based Telemetry provider */ -public class OTelTracingTelemetry implements TracingTelemetry { - - private static final Logger logger = LogManager.getLogger(OTelTracingTelemetry.class); - private final OpenTelemetry openTelemetry; +public class OTelTracingTelemetry implements TracingTelemetry { + private final RefCountedReleasable refCountedOpenTelemetry; + private final T tracerProvider; private final io.opentelemetry.api.trace.Tracer otelTracer; /** - * Creates OTel based Telemetry - * @param openTelemetry OpenTelemetry instance + * Creates OTel based {@link TracingTelemetry} + * @param refCountedOpenTelemetry OpenTelemetry instance + * @param tracerProvider {@link TracerProvider} instance. */ - public OTelTracingTelemetry(OpenTelemetry openTelemetry) { - this.openTelemetry = openTelemetry; - this.otelTracer = openTelemetry.getTracer("os-tracer"); - + public OTelTracingTelemetry(RefCountedReleasable refCountedOpenTelemetry, T tracerProvider) { + this.refCountedOpenTelemetry = refCountedOpenTelemetry; + this.refCountedOpenTelemetry.incRef(); + this.tracerProvider = tracerProvider; + this.otelTracer = tracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME); } @Override - public void close() { - try { - ((Closeable) openTelemetry).close(); - } catch (IOException e) { - logger.warn("Error while closing Opentelemetry", e); - } + public void close() throws IOException { + tracerProvider.close(); + refCountedOpenTelemetry.close(); } @Override - public Span createSpan(String spanName, Span parentSpan, Attributes attributes) { - return createOtelSpan(spanName, parentSpan, attributes); + public Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan) { + return createOtelSpan(spanCreationContext, parentSpan); } @Override public TracingContextPropagator getContextPropagator() { - return new OTelTracingContextPropagator(openTelemetry); + return new OTelTracingContextPropagator(refCountedOpenTelemetry.get()); } - private Span createOtelSpan(String spanName, Span parentSpan, Attributes attributes) { - io.opentelemetry.api.trace.Span otelSpan = otelSpan(spanName, parentSpan, OTelAttributesConverter.convert(attributes)); - return new OTelSpan(spanName, otelSpan, parentSpan); + private Span createOtelSpan(SpanCreationContext spanCreationContext, Span parentSpan) { + io.opentelemetry.api.trace.Span otelSpan = otelSpan( + spanCreationContext.getSpanName(), + parentSpan, + OTelAttributesConverter.convert(spanCreationContext.getAttributes()), + OTelSpanKindConverter.convert(spanCreationContext.getSpanKind()) + ); + Span newSpan = new OTelSpan(spanCreationContext.getSpanName(), otelSpan, parentSpan); + return newSpan; } - io.opentelemetry.api.trace.Span otelSpan(String spanName, Span parentOTelSpan, io.opentelemetry.api.common.Attributes attributes) { + io.opentelemetry.api.trace.Span otelSpan( + String spanName, + Span parentOTelSpan, + io.opentelemetry.api.common.Attributes attributes, + io.opentelemetry.api.trace.SpanKind spanKind + ) { return parentOTelSpan == null || !(parentOTelSpan instanceof OTelSpan) ? otelTracer.spanBuilder(spanName).setAllAttributes(attributes).startSpan() : otelTracer.spanBuilder(spanName) .setParent(Context.current().with(((OTelSpan) parentOTelSpan).getDelegateSpan())) .setAllAttributes(attributes) + .setSpanKind(spanKind) .startSpan(); } } diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java index cd4ffb160903b..da7ce5c47d9ca 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactory.java @@ -16,6 +16,7 @@ import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; +import java.lang.reflect.Method; import java.security.AccessController; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; @@ -55,9 +56,17 @@ private static SpanExporter instantiateSpanExporter(Class spanExpo // Check we ourselves are not being called by unprivileged code. SpecialPermission.check(); return AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + String methodName = "create"; + String getDefaultMethod = "getDefault"; + for (Method m : spanExporterProviderClass.getMethods()) { + if (m.getName().equals(getDefaultMethod)) { + methodName = getDefaultMethod; + break; + } + } try { return (SpanExporter) MethodHandles.publicLookup() - .findStatic(spanExporterProviderClass, "create", MethodType.methodType(spanExporterProviderClass)) + .findStatic(spanExporterProviderClass, methodName, MethodType.methodType(spanExporterProviderClass)) .asType(MethodType.methodType(SpanExporter.class)) .invokeExact(); } catch (Throwable e) { diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSampler.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSampler.java new file mode 100644 index 0000000000000..774070aa39df6 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSampler.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.sampler; + +import org.opensearch.telemetry.TelemetrySettings; + +import java.util.List; +import java.util.Objects; + +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.trace.data.LinkData; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import io.opentelemetry.sdk.trace.samplers.SamplingResult; + +/** + * ProbabilisticSampler implements a head-based sampling strategy based on provided settings. + */ +public class ProbabilisticSampler implements Sampler { + private Sampler defaultSampler; + private final TelemetrySettings telemetrySettings; + private double samplingRatio; + + /** + * Constructor + * + * @param telemetrySettings Telemetry settings. + */ + public ProbabilisticSampler(TelemetrySettings telemetrySettings) { + this.telemetrySettings = Objects.requireNonNull(telemetrySettings); + this.samplingRatio = telemetrySettings.getSamplingProbability(); + this.defaultSampler = Sampler.traceIdRatioBased(samplingRatio); + } + + Sampler getSampler() { + double newSamplingRatio = telemetrySettings.getSamplingProbability(); + if (isSamplingRatioChanged(newSamplingRatio)) { + synchronized (this) { + this.samplingRatio = newSamplingRatio; + defaultSampler = Sampler.traceIdRatioBased(samplingRatio); + } + } + return defaultSampler; + } + + private boolean isSamplingRatioChanged(double newSamplingRatio) { + return Double.compare(this.samplingRatio, newSamplingRatio) != 0; + } + + double getSamplingRatio() { + return samplingRatio; + } + + @Override + public SamplingResult shouldSample( + Context parentContext, + String traceId, + String name, + SpanKind spanKind, + Attributes attributes, + List parentLinks + ) { + return getSampler().shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + } + + @Override + public String getDescription() { + return "Probabilistic Sampler"; + } + + @Override + public String toString() { + return getDescription(); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/RequestSampler.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/RequestSampler.java new file mode 100644 index 0000000000000..9ea681370a3ec --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/RequestSampler.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.sampler; + +import java.util.List; + +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.trace.data.LinkData; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import io.opentelemetry.sdk.trace.samplers.SamplingResult; + +/** + * HeadBased sampler + */ +public class RequestSampler implements Sampler { + private final Sampler defaultSampler; + + // TODO: Pick value of TRACE from PR #9415. + private static final String TRACE = "trace"; + + /** + * Creates Head based sampler + * @param defaultSampler defaultSampler + */ + public RequestSampler(Sampler defaultSampler) { + this.defaultSampler = defaultSampler; + } + + @Override + public SamplingResult shouldSample( + Context parentContext, + String traceId, + String name, + SpanKind spanKind, + Attributes attributes, + List parentLinks + ) { + + final String trace = attributes.get(AttributeKey.stringKey(TRACE)); + + if (trace != null) { + return (Boolean.parseBoolean(trace) == true) ? SamplingResult.recordAndSample() : SamplingResult.drop(); + } else { + return defaultSampler.shouldSample(parentContext, traceId, name, spanKind, attributes, parentLinks); + } + + } + + @Override + public String getDescription() { + return "Request Sampler"; + } + + @Override + public String toString() { + return getDescription(); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/package-info.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/package-info.java new file mode 100644 index 0000000000000..6534b33f6177c --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/sampler/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for sampler. + */ +package org.opensearch.telemetry.tracing.sampler; diff --git a/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy b/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy index 4480cbb2bab4b..9d529ed5a2a56 100644 --- a/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy @@ -9,6 +9,9 @@ grant { permission java.lang.RuntimePermission "getClassLoader"; permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.net.NetPermission "getProxySelector"; + permission java.net.SocketPermission "*", "connect,resolve"; + permission java.util.PropertyPermission "*", "read,write"; }; diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java index 611656942860f..2fcf89947e537 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java @@ -12,12 +12,15 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.telemetry.metrics.OTelMetricsTelemetry; import org.opensearch.telemetry.tracing.OTelTracingTelemetry; import org.opensearch.telemetry.tracing.TracingTelemetry; import org.opensearch.test.OpenSearchTestCase; import org.junit.After; import org.junit.Before; +import java.io.IOException; import java.util.Arrays; import java.util.HashSet; import java.util.List; @@ -25,46 +28,57 @@ import java.util.Set; import static org.opensearch.telemetry.OTelTelemetryPlugin.OTEL_TRACER_NAME; +import static org.opensearch.telemetry.OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_BATCH_SIZE_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING; import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; public class OTelTelemetryPluginTests extends OpenSearchTestCase { - private OTelTelemetryPlugin oTelTracerModulePlugin; + private OTelTelemetryPlugin oTelTelemetryPlugin; private Optional telemetry; private TracingTelemetry tracingTelemetry; + private MetricsTelemetry metricsTelemetry; + @Before public void setup() { // TRACER_EXPORTER_DELAY_SETTING should always be less than 10 seconds because // io.opentelemetry.sdk.OpenTelemetrySdk.close waits only for 10 seconds for shutdown to complete. Settings settings = Settings.builder().put(TRACER_EXPORTER_DELAY_SETTING.getKey(), "1s").build(); - oTelTracerModulePlugin = new OTelTelemetryPlugin(settings); - telemetry = oTelTracerModulePlugin.getTelemetry(null); + oTelTelemetryPlugin = new OTelTelemetryPlugin(settings); + telemetry = oTelTelemetryPlugin.getTelemetry( + new TelemetrySettings(Settings.EMPTY, new ClusterSettings(settings, Set.of(TRACER_ENABLED_SETTING, TRACER_SAMPLER_PROBABILITY))) + ); tracingTelemetry = telemetry.get().getTracingTelemetry(); + metricsTelemetry = telemetry.get().getMetricsTelemetry(); } public void testGetTelemetry() { Set> allTracerSettings = new HashSet<>(); ClusterSettings.FEATURE_FLAGGED_CLUSTER_SETTINGS.get(List.of(FeatureFlags.TELEMETRY)).stream().forEach((allTracerSettings::add)); - assertEquals(OTEL_TRACER_NAME, oTelTracerModulePlugin.getName()); + assertEquals(OTEL_TRACER_NAME, oTelTelemetryPlugin.getName()); assertTrue(tracingTelemetry instanceof OTelTracingTelemetry); + assertTrue(metricsTelemetry instanceof OTelMetricsTelemetry); assertEquals( Arrays.asList( TRACER_EXPORTER_BATCH_SIZE_SETTING, TRACER_EXPORTER_DELAY_SETTING, TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING, - OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING + OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING, + OTEL_METRICS_EXPORTER_CLASS_SETTING ), - oTelTracerModulePlugin.getSettings() + oTelTelemetryPlugin.getSettings() ); } @After - public void cleanup() { + public void cleanup() throws IOException { tracingTelemetry.close(); + metricsTelemetry.close(); } } diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java new file mode 100644 index 0000000000000..9de575b69774a --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/OTelMetricsTelemetryTests.java @@ -0,0 +1,121 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.concurrent.RefCountedReleasable; +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.OTelTelemetryPlugin; +import org.opensearch.telemetry.metrics.tags.Tags; +import org.opensearch.test.OpenSearchTestCase; + +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.metrics.DoubleCounter; +import io.opentelemetry.api.metrics.DoubleCounterBuilder; +import io.opentelemetry.api.metrics.DoubleUpDownCounter; +import io.opentelemetry.api.metrics.DoubleUpDownCounterBuilder; +import io.opentelemetry.api.metrics.LongCounterBuilder; +import io.opentelemetry.api.metrics.LongUpDownCounterBuilder; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.api.metrics.MeterProvider; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class OTelMetricsTelemetryTests extends OpenSearchTestCase { + + @SuppressWarnings({ "rawtypes", "unchecked" }) + public void testCounter() { + String counterName = "test-counter"; + String description = "test"; + String unit = "1"; + Meter mockMeter = mock(Meter.class); + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + DoubleCounter mockOTelDoubleCounter = mock(DoubleCounter.class); + LongCounterBuilder mockOTelLongCounterBuilder = mock(LongCounterBuilder.class); + DoubleCounterBuilder mockOTelDoubleCounterBuilder = mock(DoubleCounterBuilder.class); + MeterProvider meterProvider = mock(MeterProvider.class); + when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); + when(mockMeter.counterBuilder(counterName)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.setDescription(description)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.ofDoubles()).thenReturn(mockOTelDoubleCounterBuilder); + when(mockOTelDoubleCounterBuilder.build()).thenReturn(mockOTelDoubleCounter); + + Counter counter = metricsTelemetry.createCounter(counterName, description, unit); + counter.add(1.0); + verify(mockOTelDoubleCounter).add(1.0); + Tags tags = Tags.create().addTag("test", "test"); + counter.add(2.0, tags); + verify(mockOTelDoubleCounter).add(2.0, OTelAttributesConverter.convert(tags)); + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + public void testCounterNegativeValue() { + String counterName = "test-counter"; + String description = "test"; + String unit = "1"; + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + Meter mockMeter = mock(Meter.class); + DoubleCounter mockOTelDoubleCounter = mock(DoubleCounter.class); + LongCounterBuilder mockOTelLongCounterBuilder = mock(LongCounterBuilder.class); + DoubleCounterBuilder mockOTelDoubleCounterBuilder = mock(DoubleCounterBuilder.class); + + MeterProvider meterProvider = mock(MeterProvider.class); + when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); + when(mockMeter.counterBuilder(counterName)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.setDescription(description)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongCounterBuilder); + when(mockOTelLongCounterBuilder.ofDoubles()).thenReturn(mockOTelDoubleCounterBuilder); + when(mockOTelDoubleCounterBuilder.build()).thenReturn(mockOTelDoubleCounter); + + Counter counter = metricsTelemetry.createCounter(counterName, description, unit); + counter.add(-1.0); + verify(mockOTelDoubleCounter).add(-1.0); + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + public void testUpDownCounter() { + String counterName = "test-counter"; + String description = "test"; + String unit = "1"; + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + Meter mockMeter = mock(Meter.class); + DoubleUpDownCounter mockOTelUpDownDoubleCounter = mock(DoubleUpDownCounter.class); + LongUpDownCounterBuilder mockOTelLongUpDownCounterBuilder = mock(LongUpDownCounterBuilder.class); + DoubleUpDownCounterBuilder mockOTelDoubleUpDownCounterBuilder = mock(DoubleUpDownCounterBuilder.class); + + MeterProvider meterProvider = mock(MeterProvider.class); + when(meterProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockMeter); + MetricsTelemetry metricsTelemetry = new OTelMetricsTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + meterProvider + ); + when(mockMeter.upDownCounterBuilder(counterName)).thenReturn(mockOTelLongUpDownCounterBuilder); + when(mockOTelLongUpDownCounterBuilder.setDescription(description)).thenReturn(mockOTelLongUpDownCounterBuilder); + when(mockOTelLongUpDownCounterBuilder.setUnit(unit)).thenReturn(mockOTelLongUpDownCounterBuilder); + when(mockOTelLongUpDownCounterBuilder.ofDoubles()).thenReturn(mockOTelDoubleUpDownCounterBuilder); + when(mockOTelDoubleUpDownCounterBuilder.build()).thenReturn(mockOTelUpDownDoubleCounter); + + Counter counter = metricsTelemetry.createUpDownCounter(counterName, description, unit); + counter.add(1.0); + verify(mockOTelUpDownDoubleCounter).add(1.0); + Tags tags = Tags.create().addTag("test", "test"); + counter.add(-2.0, tags); + verify(mockOTelUpDownDoubleCounter).add((-2.0), OTelAttributesConverter.convert(tags)); + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/DummyMetricExporter.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/DummyMetricExporter.java new file mode 100644 index 0000000000000..65c52911dbef9 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/DummyMetricExporter.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.exporter; + +import java.util.Collection; + +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; + +public class DummyMetricExporter implements MetricExporter { + @Override + public CompletableResultCode export(Collection metrics) { + return null; + } + + @Override + public CompletableResultCode flush() { + return null; + } + + @Override + public CompletableResultCode shutdown() { + return null; + } + + @Override + public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { + return null; + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactoryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactoryTests.java new file mode 100644 index 0000000000000..e68da030bfb52 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/metrics/exporter/OTelMetricsExporterFactoryTests.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics.exporter; + +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.OTelTelemetrySettings; +import org.opensearch.test.OpenSearchTestCase; + +import io.opentelemetry.exporter.logging.LoggingMetricExporter; +import io.opentelemetry.exporter.otlp.metrics.OtlpGrpcMetricExporter; +import io.opentelemetry.sdk.metrics.export.MetricExporter; + +public class OTelMetricsExporterFactoryTests extends OpenSearchTestCase { + + public void testMetricsExporterDefault() { + Settings settings = Settings.builder().build(); + MetricExporter metricExporter = OTelMetricsExporterFactory.create(settings); + assertTrue(metricExporter instanceof LoggingMetricExporter); + } + + public void testMetricsExporterLogging() { + Settings settings = Settings.builder() + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "io.opentelemetry.exporter.logging.LoggingMetricExporter" + ) + .build(); + MetricExporter metricExporter = OTelMetricsExporterFactory.create(settings); + assertTrue(metricExporter instanceof LoggingMetricExporter); + } + + public void testMetricExporterInvalid() { + Settings settings = Settings.builder().put(OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), "abc").build(); + assertThrows(IllegalArgumentException.class, () -> OTelMetricsExporterFactory.create(settings)); + } + + public void testMetricExporterNoCreateFactoryMethod() { + Settings settings = Settings.builder() + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.metrics.exporter.DummyMetricExporter" + ) + .build(); + IllegalStateException exception = assertThrows(IllegalStateException.class, () -> OTelMetricsExporterFactory.create(settings)); + assertEquals( + "MetricExporter instantiation failed for class [org.opensearch.telemetry.metrics.exporter.DummyMetricExporter]", + exception.getMessage() + ); + } + + public void testMetricExporterNonMetricExporterClass() { + Settings settings = Settings.builder() + .put(OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), "java.lang.String") + .build(); + IllegalStateException exception = assertThrows(IllegalStateException.class, () -> OTelMetricsExporterFactory.create(settings)); + assertEquals("MetricExporter instantiation failed for class [java.lang.String]", exception.getMessage()); + assertTrue(exception.getCause() instanceof NoSuchMethodError); + + } + + public void testMetricExporterGetDefaultMethod() { + Settings settings = Settings.builder() + .put( + OTelTelemetrySettings.OTEL_METRICS_EXPORTER_CLASS_SETTING.getKey(), + "io.opentelemetry.exporter.otlp.metrics.OtlpGrpcMetricExporter" + ) + .build(); + + assertTrue(OTelMetricsExporterFactory.create(settings) instanceof OtlpGrpcMetricExporter); + } + +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelAttributesConverterTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelAttributesConverterTests.java index d992daec1b7bb..ee67384d01759 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelAttributesConverterTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelAttributesConverterTests.java @@ -8,6 +8,8 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.telemetry.OTelAttributesConverter; +import org.opensearch.telemetry.metrics.tags.Tags; import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.test.OpenSearchTestCase; @@ -19,13 +21,13 @@ public class OTelAttributesConverterTests extends OpenSearchTestCase { public void testConverterNullAttributes() { - io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert(null); + io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert((Attributes) null); assertEquals(0, otelAttributes.size()); } public void testConverterEmptyAttributes() { Attributes attributes = Attributes.EMPTY; - io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert(null); + io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert(attributes); assertEquals(0, otelAttributes.size()); } @@ -47,4 +49,12 @@ public void testConverterMultipleAttributes() { assertEquals(4, otelAttributes.size()); otelAttributes.asMap().forEach((x, y) -> assertEquals(attributeMap.get(x.getKey()), y)); } + + public void testConverterMultipleTags() { + Tags tags = Tags.create().addTag("key1", 1l).addTag("key2", 1.0).addTag("key3", true).addTag("key4", "value4"); + Map tagsMap = tags.getTagsMap(); + io.opentelemetry.api.common.Attributes otelAttributes = OTelAttributesConverter.convert(tags); + assertEquals(4, otelAttributes.size()); + otelAttributes.asMap().forEach((x, y) -> assertEquals(tagsMap.get(x.getKey()), y)); + } } diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanKindConverterTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanKindConverterTests.java new file mode 100644 index 0000000000000..d07e32d00a92a --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanKindConverterTests.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.test.OpenSearchTestCase; + +import io.opentelemetry.api.trace.SpanKind; + +public class OTelSpanKindConverterTests extends OpenSearchTestCase { + + public void testSpanKindNullConverterNull() { + assertEquals(SpanKind.INTERNAL, OTelSpanKindConverter.convert(null)); + } + + public void testSpanKindConverter() { + assertEquals(SpanKind.INTERNAL, OTelSpanKindConverter.convert(org.opensearch.telemetry.tracing.SpanKind.INTERNAL)); + assertEquals(SpanKind.CLIENT, OTelSpanKindConverter.convert(org.opensearch.telemetry.tracing.SpanKind.CLIENT)); + assertEquals(SpanKind.SERVER, OTelSpanKindConverter.convert(org.opensearch.telemetry.tracing.SpanKind.SERVER)); + } + +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java index fcf7495f331af..16a3ec9493d5d 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java @@ -10,7 +10,9 @@ import org.opensearch.test.OpenSearchTestCase; +import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Map; import io.opentelemetry.api.OpenTelemetry; @@ -19,6 +21,7 @@ import io.opentelemetry.api.trace.TraceFlags; import io.opentelemetry.api.trace.TraceState; import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; +import io.opentelemetry.context.Context; import io.opentelemetry.context.propagation.ContextPropagators; import static org.mockito.Mockito.mock; @@ -48,8 +51,39 @@ public void testExtractTracerContextFromHeader() { OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); when(mockOpenTelemetry.getPropagators()).thenReturn(ContextPropagators.create(W3CTraceContextPropagator.getInstance())); TracingContextPropagator tracingContextPropagator = new OTelTracingContextPropagator(mockOpenTelemetry); - org.opensearch.telemetry.tracing.Span span = tracingContextPropagator.extract(requestHeaders); + org.opensearch.telemetry.tracing.Span span = tracingContextPropagator.extract(requestHeaders).orElse(null); assertEquals(TRACE_ID, span.getTraceId()); assertEquals(SPAN_ID, span.getSpanId()); } + + public void testExtractTracerContextFromHttpHeader() { + Map> requestHeaders = new HashMap<>(); + requestHeaders.put("traceparent", Arrays.asList("00-" + TRACE_ID + "-" + SPAN_ID + "-00")); + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + when(mockOpenTelemetry.getPropagators()).thenReturn(ContextPropagators.create(W3CTraceContextPropagator.getInstance())); + TracingContextPropagator tracingContextPropagator = new OTelTracingContextPropagator(mockOpenTelemetry); + org.opensearch.telemetry.tracing.Span span = tracingContextPropagator.extractFromHeaders(requestHeaders).get(); + assertEquals(TRACE_ID, span.getTraceId()); + assertEquals(SPAN_ID, span.getSpanId()); + } + + public void testExtractTracerContextFromHttpHeaderNull() { + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + when(mockOpenTelemetry.getPropagators()).thenReturn(ContextPropagators.create(W3CTraceContextPropagator.getInstance())); + TracingContextPropagator tracingContextPropagator = new OTelTracingContextPropagator(mockOpenTelemetry); + org.opensearch.telemetry.tracing.Span span = tracingContextPropagator.extractFromHeaders(null).get(); + org.opensearch.telemetry.tracing.Span propagatedSpan = new OTelPropagatedSpan(Span.fromContext(Context.root())); + assertEquals(propagatedSpan.getTraceId(), span.getTraceId()); + assertEquals(propagatedSpan.getSpanId(), span.getSpanId()); + } + + public void testExtractTracerContextFromHttpHeaderEmpty() { + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + when(mockOpenTelemetry.getPropagators()).thenReturn(ContextPropagators.create(W3CTraceContextPropagator.getInstance())); + TracingContextPropagator tracingContextPropagator = new OTelTracingContextPropagator(mockOpenTelemetry); + org.opensearch.telemetry.tracing.Span span = tracingContextPropagator.extractFromHeaders(new HashMap<>()).get(); + org.opensearch.telemetry.tracing.Span propagatedSpan = new OTelPropagatedSpan(Span.fromContext(Context.root())); + assertEquals(propagatedSpan.getTraceId(), span.getTraceId()); + assertEquals(propagatedSpan.getSpanId(), span.getSpanId()); + } } diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java index 3f46cb621a8ec..1f0c2f674e655 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java @@ -8,16 +8,16 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.concurrent.RefCountedReleasable; +import org.opensearch.telemetry.OTelTelemetryPlugin; import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.test.OpenSearchTestCase; -import java.util.Collections; -import java.util.Map; - import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.api.common.AttributesBuilder; import io.opentelemetry.api.trace.SpanBuilder; import io.opentelemetry.api.trace.Tracer; +import io.opentelemetry.api.trace.TracerProvider; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -26,40 +26,49 @@ import static org.mockito.Mockito.when; public class OTelTracingTelemetryTests extends OpenSearchTestCase { - + @SuppressWarnings({ "rawtypes", "unchecked" }) public void testCreateSpanWithoutParent() { OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); Tracer mockTracer = mock(Tracer.class); - when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer); + TracerProvider mockTracerProvider = mock(TracerProvider.class); + when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer); SpanBuilder mockSpanBuilder = mock(SpanBuilder.class); when(mockTracer.spanBuilder("span_name")).thenReturn(mockSpanBuilder); when(mockSpanBuilder.setAllAttributes(any(io.opentelemetry.api.common.Attributes.class))).thenReturn(mockSpanBuilder); when(mockSpanBuilder.startSpan()).thenReturn(mock(io.opentelemetry.api.trace.Span.class)); - Map attributeMap = Collections.singletonMap("name", "value"); + when(mockSpanBuilder.setSpanKind(any(io.opentelemetry.api.trace.SpanKind.class))).thenReturn(mockSpanBuilder); Attributes attributes = Attributes.create().addAttribute("name", "value"); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); - Span span = tracingTelemetry.createSpan("span_name", null, attributes); - + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); + Span span = tracingTelemetry.createSpan(SpanCreationContext.internal().name("span_name").attributes(attributes), null); verify(mockSpanBuilder, never()).setParent(any()); verify(mockSpanBuilder).setAllAttributes(createAttribute(attributes)); assertNull(span.getParentSpan()); } + @SuppressWarnings({ "rawtypes", "unchecked" }) public void testCreateSpanWithParent() { OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); Tracer mockTracer = mock(Tracer.class); - when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer); + TracerProvider mockTracerProvider = mock(TracerProvider.class); + when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer); SpanBuilder mockSpanBuilder = mock(SpanBuilder.class); when(mockTracer.spanBuilder("span_name")).thenReturn(mockSpanBuilder); when(mockSpanBuilder.setParent(any())).thenReturn(mockSpanBuilder); when(mockSpanBuilder.setAllAttributes(any(io.opentelemetry.api.common.Attributes.class))).thenReturn(mockSpanBuilder); when(mockSpanBuilder.startSpan()).thenReturn(mock(io.opentelemetry.api.trace.Span.class)); + when(mockSpanBuilder.setSpanKind(any(io.opentelemetry.api.trace.SpanKind.class))).thenReturn(mockSpanBuilder); Span parentSpan = new OTelSpan("parent_span", mock(io.opentelemetry.api.trace.Span.class), null); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); Attributes attributes = Attributes.create().addAttribute("name", 1l); - Span span = tracingTelemetry.createSpan("span_name", parentSpan, attributes); + Span span = tracingTelemetry.createSpan(SpanCreationContext.internal().name("span_name").attributes(attributes), parentSpan); verify(mockSpanBuilder).setParent(any()); verify(mockSpanBuilder).setAllAttributes(createAttributeLong(attributes)); @@ -68,25 +77,31 @@ public void testCreateSpanWithParent() { assertEquals("parent_span", span.getParentSpan().getSpanName()); } + @SuppressWarnings({ "rawtypes", "unchecked" }) public void testCreateSpanWithParentWithMultipleAttributes() { OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); Tracer mockTracer = mock(Tracer.class); - when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer); + TracerProvider mockTracerProvider = mock(TracerProvider.class); + when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer); SpanBuilder mockSpanBuilder = mock(SpanBuilder.class); when(mockTracer.spanBuilder("span_name")).thenReturn(mockSpanBuilder); when(mockSpanBuilder.setParent(any())).thenReturn(mockSpanBuilder); when(mockSpanBuilder.setAllAttributes(any(io.opentelemetry.api.common.Attributes.class))).thenReturn(mockSpanBuilder); when(mockSpanBuilder.startSpan()).thenReturn(mock(io.opentelemetry.api.trace.Span.class)); + when(mockSpanBuilder.setSpanKind(any(io.opentelemetry.api.trace.SpanKind.class))).thenReturn(mockSpanBuilder); Span parentSpan = new OTelSpan("parent_span", mock(io.opentelemetry.api.trace.Span.class), null); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); Attributes attributes = Attributes.create() .addAttribute("key1", 1l) .addAttribute("key2", 2.0) .addAttribute("key3", true) .addAttribute("key4", "key4"); - Span span = tracingTelemetry.createSpan("span_name", parentSpan, attributes); + Span span = tracingTelemetry.createSpan(SpanCreationContext.internal().name("span_name").attributes(attributes), parentSpan); io.opentelemetry.api.common.Attributes otelAttributes = io.opentelemetry.api.common.Attributes.builder() .put("key1", 1l) @@ -113,12 +128,17 @@ private io.opentelemetry.api.common.Attributes createAttributeLong(Attributes at return attributesBuilder.build(); } + @SuppressWarnings({ "rawtypes", "unchecked" }) public void testGetContextPropagator() { OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); Tracer mockTracer = mock(Tracer.class); - when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer); + TracerProvider mockTracerProvider = mock(TracerProvider.class); + when(mockTracerProvider.get(OTelTelemetryPlugin.INSTRUMENTATION_SCOPE_NAME)).thenReturn(mockTracer); - TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry( + new RefCountedReleasable("telemetry", mockOpenTelemetry, () -> {}), + mockTracerProvider + ); assertTrue(tracingTelemetry.getContextPropagator() instanceof OTelTracingContextPropagator); } diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/DummySpanExporterWithGetDefault.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/DummySpanExporterWithGetDefault.java new file mode 100644 index 0000000000000..225cfa6ab2d1a --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/DummySpanExporterWithGetDefault.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.exporter; + +import java.util.Collection; + +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.opentelemetry.sdk.trace.export.SpanExporter; + +public class DummySpanExporterWithGetDefault implements SpanExporter { + + public static DummySpanExporterWithGetDefault getDefault() { + return new DummySpanExporterWithGetDefault(); + } + + @Override + public CompletableResultCode export(Collection spans) { + return null; + } + + @Override + public CompletableResultCode flush() { + return null; + } + + @Override + public CompletableResultCode shutdown() { + return null; + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactoryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactoryTests.java index d9a86ccd3180b..d71aef9366e21 100644 --- a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactoryTests.java +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/exporter/OTelSpanExporterFactoryTests.java @@ -63,4 +63,15 @@ public void testSpanExporterNonSpanExporterClass() { } + public void testSpanExporterGetDefaultMethod() { + Settings settings = Settings.builder() + .put( + OTelTelemetrySettings.OTEL_TRACER_SPAN_EXPORTER_CLASS_SETTING.getKey(), + "org.opensearch.telemetry.tracing.exporter.DummySpanExporterWithGetDefault" + ) + .build(); + + assertTrue(OTelSpanExporterFactory.create(settings) instanceof DummySpanExporterWithGetDefault); + } + } diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSamplerTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSamplerTests.java new file mode 100644 index 0000000000000..639dc341ef0db --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/ProbabilisticSamplerTests.java @@ -0,0 +1,64 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.sampler; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Set; + +import io.opentelemetry.sdk.trace.samplers.Sampler; + +import static org.opensearch.telemetry.OTelTelemetrySettings.TRACER_EXPORTER_DELAY_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_ENABLED_SETTING; +import static org.opensearch.telemetry.TelemetrySettings.TRACER_SAMPLER_PROBABILITY; + +public class ProbabilisticSamplerTests extends OpenSearchTestCase { + + // When ProbabilisticSampler is created with OTelTelemetrySettings as null + public void testProbabilisticSamplerWithNullSettings() { + // Verify that the constructor throws IllegalArgumentException when given null settings + assertThrows(NullPointerException.class, () -> { new ProbabilisticSampler(null); }); + } + + public void testDefaultGetSampler() { + Settings settings = Settings.builder().put(TRACER_EXPORTER_DELAY_SETTING.getKey(), "1s").build(); + TelemetrySettings telemetrySettings = new TelemetrySettings( + Settings.EMPTY, + new ClusterSettings(settings, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)) + ); + + // Probabilistic Sampler + ProbabilisticSampler probabilisticSampler = new ProbabilisticSampler(telemetrySettings); + + assertNotNull(probabilisticSampler.getSampler()); + assertEquals(0.01, probabilisticSampler.getSamplingRatio(), 0.0d); + } + + public void testGetSamplerWithUpdatedSamplingRatio() { + Settings settings = Settings.builder().put(TRACER_EXPORTER_DELAY_SETTING.getKey(), "1s").build(); + TelemetrySettings telemetrySettings = new TelemetrySettings( + Settings.EMPTY, + new ClusterSettings(settings, Set.of(TRACER_SAMPLER_PROBABILITY, TRACER_ENABLED_SETTING)) + ); + + // Probabilistic Sampler + ProbabilisticSampler probabilisticSampler = new ProbabilisticSampler(telemetrySettings); + assertEquals(0.01d, probabilisticSampler.getSamplingRatio(), 0.0d); + + telemetrySettings.setSamplingProbability(0.02); + + // Need to call getSampler() to update the value of tracerHeadSamplerSamplingRatio + Sampler updatedProbabilisticSampler = probabilisticSampler.getSampler(); + assertEquals(0.02, probabilisticSampler.getSamplingRatio(), 0.0d); + } + +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/RequestSamplerTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/RequestSamplerTests.java new file mode 100644 index 0000000000000..facf04623ec46 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/sampler/RequestSamplerTests.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.sampler; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Collections; + +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.SpanKind; +import io.opentelemetry.context.Context; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import io.opentelemetry.sdk.trace.samplers.SamplingResult; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class RequestSamplerTests extends OpenSearchTestCase { + + public void testShouldSampleWithTraceAttributeAsTrue() { + + // Create a mock default sampler + Sampler defaultSampler = mock(Sampler.class); + when(defaultSampler.shouldSample(any(), anyString(), anyString(), any(), any(), any())).thenReturn(SamplingResult.drop()); + + // Create an instance of HeadSampler with the mock default sampler + RequestSampler requestSampler = new RequestSampler(defaultSampler); + + // Create a mock Context and Attributes + Context parentContext = mock(Context.class); + Attributes attributes = Attributes.of(AttributeKey.stringKey("trace"), "true"); + + // Call shouldSample on HeadSampler + SamplingResult result = requestSampler.shouldSample( + parentContext, + "traceId", + "spanName", + SpanKind.INTERNAL, + attributes, + Collections.emptyList() + ); + + assertEquals(SamplingResult.recordAndSample(), result); + + // Verify that the default sampler's shouldSample method was not called + verify(defaultSampler, never()).shouldSample(any(), anyString(), anyString(), any(), any(), any()); + } + + public void testShouldSampleWithoutTraceAttribute() { + + // Create a mock default sampler + Sampler defaultSampler = mock(Sampler.class); + when(defaultSampler.shouldSample(any(), anyString(), anyString(), any(), any(), any())).thenReturn( + SamplingResult.recordAndSample() + ); + + // Create an instance of HeadSampler with the mock default sampler + RequestSampler requestSampler = new RequestSampler(defaultSampler); + + // Create a mock Context and Attributes + Context parentContext = mock(Context.class); + Attributes attributes = Attributes.empty(); + + // Call shouldSample on HeadSampler + SamplingResult result = requestSampler.shouldSample( + parentContext, + "traceId", + "spanName", + SpanKind.INTERNAL, + attributes, + Collections.emptyList() + ); + + // Verify that HeadSampler returned SamplingResult.recordAndSample() + assertEquals(SamplingResult.recordAndSample(), result); + + // Verify that the default sampler's shouldSample method was called + verify(defaultSampler).shouldSample(any(), anyString(), anyString(), any(), any(), any()); + } + +} diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..aaf2e35302d77 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +39b05d2d4027971bf99111a9be1d7035a116bb55 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.96.Final.jar.sha1 deleted file mode 100644 index 7abdb33dc79a2..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b80fffbe77485b457bf844289bf1801f61b9e91 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..a77333ea8ae47 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +9c3c71e7cf3b8ce3bfc9fa52a524b9ca7ddf259c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.96.Final.jar.sha1 deleted file mode 100644 index 8fdb32be1de0b..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9cfe430f8b14e7ba86969d8e1126aa0aae4d18f0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..6f26bf4e6a9b5 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +992623e7d8f2d96e41faf1687bb963f5433e3517 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.96.Final.jar.sha1 deleted file mode 100644 index dfb0cf39463e2..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a4d0d95df5026965c454902ef3d6d84b81f89626 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..d2ff72db60d1f --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +847f942381145de23f21c836d05b0677474271d3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.96.Final.jar.sha1 deleted file mode 100644 index 85b5f52749671..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d10c167623cbc471753f950846df241d1021655c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..f12a6046e96d0 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +4c0acdb8bb73647ebb3847ac2d503d53d72c02b4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.96.Final.jar.sha1 deleted file mode 100644 index fe4f48c68e78b..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7840d7523d709e02961b647546f9d9dde1699306 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..8e4179ba15942 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +fe62f9ccd41b8660d07639dbbab8ae1edd6f2720 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.96.Final.jar.sha1 deleted file mode 100644 index 9e93f013226cd..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0e51db5568a881e0f9b013b35617c597dc32f130 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 new file mode 100644 index 0000000000000..ab2819da570fd --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.100.Final.jar.sha1 @@ -0,0 +1 @@ +6620fbfb47667a5eb6050e35c7b4c88000bcd77f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.96.Final.jar.sha1 deleted file mode 100644 index 707285d3d29c3..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.96.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dbd15ca244be28e1a98ed29b9d755edbfa737e02 \ No newline at end of file diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java index d25ef33c2ce29..5abd6f2710198 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpRequest.java @@ -257,7 +257,7 @@ public FullHttpRequest nettyRequest() { /** * A wrapper of {@link HttpHeaders} that implements a map to prevent copying unnecessarily. This class does not support modifications * and due to the underlying implementation, it performs case insensitive lookups of key to values. - * + *

      * It is important to note that this implementation does have some downsides in that each invocation of the * {@link #values()} and {@link #entrySet()} methods will perform a copy of the values in the HttpHeaders rather than returning a * view of the underlying values. diff --git a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java index 6165df6a591d6..ecf9ad9f17f87 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/http/nio/NioHttpServerTransport.java @@ -56,6 +56,7 @@ import org.opensearch.nio.NioSocketChannel; import org.opensearch.nio.ServerChannelContext; import org.opensearch.nio.SocketChannelContext; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.nio.NioGroupFactory; import org.opensearch.transport.nio.PageAllocator; @@ -106,9 +107,10 @@ public NioHttpServerTransport( NamedXContentRegistry xContentRegistry, Dispatcher dispatcher, NioGroupFactory nioGroupFactory, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { - super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings); + super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher, clusterSettings, tracer); this.pageAllocator = new PageAllocator(pageCacheRecycler); this.nioGroupFactory = nioGroupFactory; diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java index dfa72d6d59a0d..55920bab4efd3 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java @@ -52,6 +52,7 @@ import org.opensearch.nio.NioSelector; import org.opensearch.nio.NioSocketChannel; import org.opensearch.nio.ServerChannelContext; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TcpTransport; import org.opensearch.transport.TransportSettings; @@ -84,9 +85,10 @@ protected NioTransport( PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService, - NioGroupFactory groupFactory + NioGroupFactory groupFactory, + Tracer tracer ) { - super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); + super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService, tracer); this.pageAllocator = new PageAllocator(pageCacheRecycler); this.groupFactory = groupFactory; } diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java index a3475c2ea2969..d4be876867651 100644 --- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java +++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java @@ -50,6 +50,7 @@ import org.opensearch.http.nio.NioHttpServerTransport; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; @@ -90,7 +91,8 @@ public Map> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap( NIO_TRANSPORT_NAME, @@ -102,7 +104,8 @@ public Map> getTransports( pageCacheRecycler, namedWriteableRegistry, circuitBreakerService, - getNioGroupFactory(settings) + getNioGroupFactory(settings), + tracer ) ); } @@ -117,7 +120,8 @@ public Map> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { return Collections.singletonMap( NIO_HTTP_TRANSPORT_NAME, @@ -130,7 +134,8 @@ public Map> getHttpTransports( xContentRegistry, dispatcher, getNioGroupFactory(settings), - clusterSettings + clusterSettings, + tracer ) ); } diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java index 89e6e9ce88408..09594673de5b2 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java @@ -56,6 +56,7 @@ import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.threadpool.TestThreadPool; @@ -186,7 +187,8 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -236,7 +238,8 @@ public void testBindUnavailableAddress() { xContentRegistry(), new NullDispatcher(), new NioGroupFactory(Settings.EMPTY, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -255,7 +258,8 @@ public void testBindUnavailableAddress() { xContentRegistry(), new NullDispatcher(), new NioGroupFactory(Settings.EMPTY, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); @@ -298,7 +302,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -372,7 +377,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new NioGroupFactory(Settings.EMPTY, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -438,7 +444,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); @@ -500,7 +507,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th xContentRegistry(), dispatcher, new NioGroupFactory(settings, logger), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) ) { transport.start(); diff --git a/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java b/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java index 24cc38c17a9d1..f5d1c618f5ace 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java @@ -44,6 +44,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransportService; import org.opensearch.test.transport.StubbableTransport; import org.opensearch.transport.AbstractSimpleTransportTestCase; @@ -81,7 +82,8 @@ protected Transport build(Settings settings, final Version version, ClusterSetti new MockPageCacheRecycler(settings), namedWriteableRegistry, new NoneCircuitBreakerService(), - new NioGroupFactory(settings, logger) + new NioGroupFactory(settings, logger), + NoopTracer.INSTANCE ) { @Override diff --git a/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java index 6e21b0c45411d..c6380c69a1c95 100644 --- a/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -67,6 +67,7 @@ import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.rest.OpenSearchRestTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -118,7 +119,7 @@ private static MockTransportService startTransport( boolean success = false; final Settings s = Settings.builder().put("node.name", id).build(); ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(s); - MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, null); + MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, NoopTracer.INSTANCE); try { newService.registerRequestHandler(ClusterSearchShardsAction.NAME, ThreadPool.Names.SAME, ClusterSearchShardsRequest::new, (request, channel, task) -> { diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java index 1648f345104ea..12d5b98f0c13f 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java @@ -77,7 +77,7 @@ * PercolatorFieldMapper#createQueryBuilderField(...) method). Using the query builders writable contract. This test * does best effort verifying that we don't break bwc for query builders between the first previous major version and * the latest current major release. - * + *

      * The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the * json format of a query being tested here then feel free to change this. */ diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java index 303cca60a2182..b31706d5e2d2f 100644 --- a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java @@ -68,13 +68,14 @@ public class IndexingIT extends OpenSearchRestTestCase { protected static final Version UPGRADE_FROM_VERSION = Version.fromString(System.getProperty("tests.upgrade_from_version")); + private static final String TEST_MAPPING = createTestMapping(); private int indexDocs(String index, final int idStart, final int numDocs) throws IOException { for (int i = 0; i < numDocs; i++) { final int id = idStart + i; Request request = new Request("PUT", index + "/_doc/" + id); - request.setJsonEntity("{\"test\": \"test_" + randomAlphaOfLength(2) + "\"}"); + request.setJsonEntity("{\"test\": \"test_" + randomAlphaOfLength(2) + "\", \"sortfield\": \""+ randomIntBetween(0, numDocs) + "\"}"); assertOK(client().performRequest(request)); } return numDocs; @@ -132,9 +133,10 @@ public void testIndexingWithPrimaryOnBwcNodes() throws Exception { .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .putList("index.sort.field", "sortfield") .put("index.routing.allocation.include._name", bwcNames); final String index = "test-index"; - createIndex(index, settings.build()); + createIndex(index, settings.build(), TEST_MAPPING); ensureNoInitializingShards(); // wait for all other shard activity to finish int docCount = 200; @@ -183,9 +185,10 @@ public void testIndexingWithReplicaOnBwcNodes() throws Exception { .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .putList("index.sort.field", "sortfield") .put("index.routing.allocation.exclude._name", bwcNames); final String index = "test-index"; - createIndex(index, settings.build()); + createIndex(index, settings.build(), TEST_MAPPING); ensureNoInitializingShards(); // wait for all other shard activity to finish printClusterRouting(); @@ -219,11 +222,12 @@ public void testIndexVersionPropagation() throws Exception { Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) + .putList("index.sort.field", "sortfield") .put("index.routing.allocation.include._name", bwcNames); final String index = "indexversionprop"; final int minUpdates = 5; final int maxUpdates = 10; - createIndex(index, settings.build()); + createIndex(index, settings.build(), TEST_MAPPING); try (RestClient newNodeClient = buildClient(restClientSettings(), nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { @@ -305,10 +309,11 @@ public void testSeqNoCheckpoints() throws Exception { Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2) + .putList("index.sort.field", "sortfield") .put("index.routing.allocation.include._name", bwcNames); final String index = "test"; - createIndex(index, settings.build()); + createIndex(index, settings.build(), TEST_MAPPING); try (RestClient newNodeClient = buildClient(restClientSettings(), nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { int numDocs = 0; @@ -387,10 +392,11 @@ public void testUpdateSnapshotStatus() throws Exception { Settings.Builder settings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), between(5, 10)) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + .putList("index.sort.field", "sortfield") .put("index.routing.allocation.include._name", bwcNames); final String index = "test-snapshot-index"; - createIndex(index, settings.build()); + createIndex(index, settings.build(), TEST_MAPPING); indexDocs(index, 0, between(50, 100)); ensureGreen(index); assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); @@ -424,7 +430,8 @@ public void testSyncedFlushTransition() throws Exception { createIndex(index, Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) - .put("index.routing.allocation.include._name", newNodes).build()); + .putList("index.sort.field", "sortfield") + .put("index.routing.allocation.include._name", newNodes).build(), TEST_MAPPING); ensureGreen(index); indexDocs(index, randomIntBetween(0, 100), between(1, 100)); try (RestClient oldNodeClient = buildClient(restClientSettings(), @@ -669,4 +676,15 @@ public String toString() { '}'; } } + + private static String createTestMapping() { + return " \"properties\": {\n" + + " \"test\": {\n" + + " \"type\": \"text\"\n" + + " },\n" + + " \"sortfield\": {\n" + + " \"type\": \"integer\"\n" + + " }\n" + + " }"; + } } diff --git a/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java index 949369899dc82..2c28d8c0517f6 100644 --- a/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java +++ b/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java @@ -65,7 +65,7 @@ /** * Create a simple "daemon controller", put it in the right place and check that it runs. - * + *

      * Extends LuceneTestCase rather than OpenSearchTestCase as OpenSearchTestCase installs a system call filter, and * that prevents the Spawner class from doing its job. Also needs to run in a separate JVM to other * tests that extend OpenSearchTestCase for the same reason. diff --git a/qa/os/build.gradle b/qa/os/build.gradle index 66c6525439dac..082ed5277575a 100644 --- a/qa/os/build.gradle +++ b/qa/os/build.gradle @@ -70,6 +70,11 @@ tasks.dependenciesInfo.enabled = false tasks.thirdPartyAudit.ignoreMissingClasses() +tasks.thirdPartyAudit.ignoreViolations( + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil', + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil$1' +) + tasks.register('destructivePackagingTest') { dependsOn 'destructiveDistroTest' } diff --git a/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java b/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java index 02a613be320c2..4bb3877fc04a8 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java +++ b/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java @@ -441,7 +441,7 @@ public static Path createTempDir(String prefix) throws IOException { /** * Run the given action with a temporary copy of the config directory. - * + *

      * Files under the path passed to the action may be modified as necessary for the * test to execute, and running OpenSearch with {@link #startOpenSearch()} will * use the temporary directory. diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java b/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java index 7904d1a046916..958de24848178 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java @@ -51,7 +51,7 @@ /** * Asserts that a file at a path matches its status as Directory/File, and its owner. If on a posix system, also matches the permission * set is what we expect. - * + *

      * This class saves information about its failed matches in instance variables and so instances should not be reused */ public class FileMatcher extends TypeSafeMatcher { diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java b/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java index 25cefa948ff10..26af39d66cad3 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/Installation.java @@ -137,7 +137,7 @@ public static Installation ofContainer(Shell sh, Distribution distribution) { /** * Returns the user that owns this installation. - * + *

      * For packages this is root, and for archives it is the user doing the installation. */ public String getOwner() { diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java b/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java index b80ae422bda9a..e9ebf28042b46 100644 --- a/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java +++ b/qa/os/src/test/java/org/opensearch/packaging/util/Packages.java @@ -194,11 +194,11 @@ private static void verifyInstallation(Installation opensearch, Distribution dis // we shell out here because java's posix file permission view doesn't support special modes assertThat(opensearch.config, file(Directory, "root", "opensearch", p750)); - assertThat(sh.run("find \"" + opensearch.config + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("2750")); + assertThat(sh.run("find \"" + opensearch.config + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("750")); final Path jvmOptionsDirectory = opensearch.config.resolve("jvm.options.d"); assertThat(jvmOptionsDirectory, file(Directory, "root", "opensearch", p750)); - assertThat(sh.run("find \"" + jvmOptionsDirectory + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("2750")); + assertThat(sh.run("find \"" + jvmOptionsDirectory + "\" -maxdepth 0 -printf \"%m\"").stdout, containsString("750")); Stream.of("opensearch.keystore", "opensearch.yml", "jvm.options", "log4j2.properties") .forEach(configFile -> assertThat(opensearch.config(configFile), file(File, "root", "opensearch", p660))); diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index 3f341537ee934..2a90e794f3565 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -102,11 +102,11 @@ private void waitForSearchableDocs(String index, int shardCount, int replicaCoun // Verify segment store assertBusy(() -> { - /** - * Use default tabular output and sort response based on shard,segment,primaryOrReplica columns to allow line by - * line parsing where records related to a segment (e.g. _0) are chunked together with first record belonging - * to primary while remaining *replicaCount* records belongs to replica copies - * */ + /* + Use default tabular output and sort response based on shard,segment,primaryOrReplica columns to allow line by + line parsing where records related to a segment (e.g. _0) are chunked together with first record belonging + to primary while remaining *replicaCount* records belongs to replica copies + */ Request segrepStatsRequest = new Request("GET", "/_cat/segments/" + index + "?s=shard,segment,primaryOrReplica"); segrepStatsRequest.addParameter("h", "index,shard,primaryOrReplica,segment,docs.count"); Response segrepStatsResponse = client().performRequest(segrepStatsRequest); @@ -263,7 +263,8 @@ public void testIndexing() throws Exception { * This test verifies that during rolling upgrades the segment replication does not break when replica shards can * be running on older codec versions. * - * @throws Exception + * @throws Exception if index creation fail + * @throws UnsupportedOperationException if cluster type is unknown */ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/7679") public void testIndexingWithSegRep() throws Exception { diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml index e6a2a3d52e116..c043015281a9a 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/10_pipeline_with_mustache_templates.yml @@ -208,7 +208,7 @@ id: 1 - length: { _source: 2 } - match: { _source.do_nothing: "foo" } - - match: { _source.error: "processor first_processor [remove]: field [field_to_remove] not present as part of path [field_to_remove]" } + - match: { _source.error: "processor first_processor [remove]: field [field_to_remove] doesn't exist" } --- "Test rolling up json object arrays": diff --git a/release-notes/opensearch.release-notes-2.10.0.md b/release-notes/opensearch.release-notes-2.10.0.md new file mode 100644 index 0000000000000..9d5f75d61ee2a --- /dev/null +++ b/release-notes/opensearch.release-notes-2.10.0.md @@ -0,0 +1,136 @@ +## 2023-09-08 Version 2.10.0 Release Notes + +## [2.10] + +### Added +- Add server version as REST response header [#6583](https://github.com/opensearch-project/OpenSearch/issues/6583) +- Start replication checkpointTimers on primary before segments upload to remote store. ([#8221]()https://github.com/opensearch-project/OpenSearch/pull/8221) +- Introduce new static cluster setting to control slice computation for concurrent segment search. ([#8847](https://github.com/opensearch-project/OpenSearch/pull/8884)) +- Add configuration for file cache size to max remote data ratio to prevent oversubscription of file cache ([#8606](https://github.com/opensearch-project/OpenSearch/pull/8606)) +- Disallow compression level to be set for default and best_compression index codecs ([#8737]()https://github.com/opensearch-project/OpenSearch/pull/8737) +- [distribution/archives] [Linux] [x64] Provide the variant of the distributions bundled with JRE ([#8195]()https://github.com/opensearch-project/OpenSearch/pull/8195) +- Prioritize replica shard movement during shard relocation ([#8875](https://github.com/opensearch-project/OpenSearch/pull/8875)) +- Introducing Default and Best Compression codecs as their algorithm name ([#9123](https://github.com/opensearch-project/OpenSearch/pull/9123)) +- Make SearchTemplateRequest implement IndicesRequest.Replaceable ([#9122](https://github.com/opensearch-project/OpenSearch/pull/9122)) +- [BWC and API enforcement] Define the initial set of annotations, their meaning and relations between them ([#9223](https://github.com/opensearch-project/OpenSearch/pull/9223)) +- [Remote Store] Add Segment download stats to remotestore stats API ([#8718](https://github.com/opensearch-project/OpenSearch/pull/8718)) +- [Remote Store] Add remote segment transfer stats on NodesStats API ([#9168](https://github.com/opensearch-project/OpenSearch/pull/9168) [#9393](https://github.com/opensearch-project/OpenSearch/pull/9393) [#9454](https://github.com/opensearch-project/OpenSearch/pull/9454)) +- [Segment Replication] Support realtime reads for GET requests ([#9212](https://github.com/opensearch-project/OpenSearch/pull/9212)) +- Allow test clusters to run with TLS ([#8900](https://github.com/opensearch-project/OpenSearch/pull/8900)) +- Add jdk.incubator.vector module support for JDK 20+ ([#8601](https://github.com/opensearch-project/OpenSearch/pull/8601)) +- [Feature] Expose term frequency in Painless script score context ([#9081](https://github.com/opensearch-project/OpenSearch/pull/9081)) +- Add support for reading partial files to HDFS repository ([#9513](https://github.com/opensearch-project/OpenSearch/issues/9513)) +- [Remote Store] Rate limiter integration for remote store uploads and downloads([#9448](https://github.com/opensearch-project/OpenSearch/pull/9448/)) +- [BWC and API enforcement] Decorate the existing APIs with proper annotations (part 1) ([#9520](https://github.com/opensearch-project/OpenSearch/pull/9520)) +- Add support for extensions to search responses using SearchExtBuilder ([#9379](https://github.com/opensearch-project/OpenSearch/pull/9379)) +- [Remote State] Create service to publish cluster state to remote store ([#9160](https://github.com/opensearch-project/OpenSearch/pull/9160)) +- Core crypto library to perform encryption and decryption of source content ([#8466](https://github.com/opensearch-project/OpenSearch/pull/8466)) +- Expose DelimitedTermFrequencyTokenFilter to allow providing term frequencies along with terms ([#9479](https://github.com/opensearch-project/OpenSearch/pull/9479)) +- APIs for performing async blob reads and async downloads from the repository using multiple streams ([#9592](https://github.com/opensearch-project/OpenSearch/issues/9592)) +- Add concurrent segment search related metrics to node and index stats ([#9622](https://github.com/opensearch-project/OpenSearch/issues/9622)) +- Add average concurrency metric for concurrent segment search ([#9670](https://github.com/opensearch-project/OpenSearch/issues/9670)) +- Introduce cluster default remote translog buffer interval setting ([#9584](https://github.com/opensearch-project/OpenSearch/pull/9584)) +- Added encryption-sdk lib to provide encryption and decryption capabilities ([#8466](https://github.com/opensearch-project/OpenSearch/pull/8466) [#9289](https://github.com/opensearch-project/OpenSearch/pull/9289)) +- [Segment Replication] Adding segment replication statistics rolled up at index, node and cluster level ([#9709](https://github.com/opensearch-project/OpenSearch/pull/9709)) +- Added crypto-kms plugin to provide AWS KMS based key providers for encryption/decryption. ([#8465](https://github.com/opensearch-project/OpenSearch/pull/8465)) +- [Remote state] Integrate remote cluster state in publish/commit flow ([#9665](https://github.com/opensearch-project/OpenSearch/pull/9665)) +- [Remote Store] Changes to introduce repository registration during bootstrap via node attributes. ([#9105](https://github.com/opensearch-project/OpenSearch/pull/9105)) +- [Remote state] Auto restore index metadata from last known cluster state ([#9831](https://github.com/opensearch-project/OpenSearch/pull/9831)) + +### Dependencies +- Bump `org.apache.logging.log4j:log4j-core` from 2.17.1 to 2.20.0 ([#8307](https://github.com/opensearch-project/OpenSearch/pull/8307)) +- Bump `io.grpc:grpc-context` from 1.46.0 to 1.57.1 ([#8726](https://github.com/opensearch-project/OpenSearch/pull/8726), [#9145](https://github.com/opensearch-project/OpenSearch/pull/9145)) +- Bump `com.netflix.nebula:gradle-info-plugin` from 12.1.5 to 12.1.6 ([#8724](https://github.com/opensearch-project/OpenSearch/pull/8724)) +- Bump `commons-codec:commons-codec` from 1.15 to 1.16.0 ([#8725](https://github.com/opensearch-project/OpenSearch/pull/8725)) +- Bump `org.apache.zookeeper:zookeeper` from 3.8.1 to 3.9.0 ([#8844](https://github.com/opensearch-project/OpenSearch/pull/8844), [#9146](https://github.com/opensearch-project/OpenSearch/pull/9146)) +- Bump `org.gradle.test-retry` from 1.5.3 to 1.5.4 ([#8842](https://github.com/opensearch-project/OpenSearch/pull/8842)) +- Bump `com.netflix.nebula.ospackage-base` from 11.3.0 to 11.4.0 ([#8838](https://github.com/opensearch-project/OpenSearch/pull/8838)) +- Bump `com.google.http-client:google-http-client-gson` from 1.43.2 to 1.43.3 ([#8840](https://github.com/opensearch-project/OpenSearch/pull/8840)) +- OpenJDK Update (July 2023 Patch releases) ([#8869](https://github.com/opensearch-project/OpenSearch/pull/8869)) +- Bump `hadoop` libraries from 3.3.4 to 3.3.6 ([#6995](https://github.com/opensearch-project/OpenSearch/pull/6995)) +- Bump `com.gradle.enterprise` from 3.13.3 to 3.14.1 ([#8996](https://github.com/opensearch-project/OpenSearch/pull/8996)) +- Bump `org.apache.commons:commons-lang3` from 3.12.0 to 3.13.0 ([#8995](https://github.com/opensearch-project/OpenSearch/pull/8995)) +- Bump `com.google.cloud:google-cloud-core-http` from 2.21.0 to 2.21.1 ([#8999](https://github.com/opensearch-project/OpenSearch/pull/8999)) +- Bump `com.maxmind.geoip2:geoip2` from 4.0.1 to 4.1.0 ([#8998](https://github.com/opensearch-project/OpenSearch/pull/8998)) +- Bump `org.apache.commons:commons-lang3` from 3.12.0 to 3.13.0 in /plugins/repository-hdfs ([#8997](https://github.com/opensearch-project/OpenSearch/pull/8997)) +- Bump `netty` from 4.1.94.Final to 4.1.96.Final ([#9030](https://github.com/opensearch-project/OpenSearch/pull/9030)) +- Bump `com.google.jimfs:jimfs` from 1.2 to 1.3.0 ([#9080](https://github.com/opensearch-project/OpenSearch/pull/9080)) +- Bump `io.projectreactor.netty:reactor-netty-http` from 1.1.8 to 1.1.9 ([#9147](https://github.com/opensearch-project/OpenSearch/pull/9147)) +- Bump `org.apache.maven:maven-model` from 3.9.3 to 3.9.4 ([#9148](https://github.com/opensearch-project/OpenSearch/pull/9148)) +- Bump `com.azure:azure-storage-blob` from 12.22.3 to 12.23.0 ([#9231](https://github.com/opensearch-project/OpenSearch/pull/9231)) +- Bump `com.diffplug.spotless` from 6.19.0 to 6.20.0 ([#9227](https://github.com/opensearch-project/OpenSearch/pull/9227)) +- Bump `org.xerial.snappy:snappy-java` from 1.1.8.2 to 1.1.10.3 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) +- Bump `com.squareup.okhttp3:okhttp` from 4.9.3 to 4.11.0 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) +- Bump `com.squareup.okio:okio` from 2.8.0 to 3.5.0 ([#9252](https://github.com/opensearch-project/OpenSearch/pull/9252)) +- Bump `com.google.code.gson:gson` from 2.9.0 to 2.10.1 ([#9230](https://github.com/opensearch-project/OpenSearch/pull/9230)) +- Bump `lycheeverse/lychee-action` from 1.2.0 to 1.8.0 ([#9228](https://github.com/opensearch-project/OpenSearch/pull/9228)) +- Bump `snakeyaml` from 2.0 to 2.1 ([#9269](https://github.com/opensearch-project/OpenSearch/pull/9269)) +- Bump `aws-actions/configure-aws-credentials` from 1 to 2 ([#9302](https://github.com/opensearch-project/OpenSearch/pull/9302)) +- Bump `com.github.luben:zstd-jni` from 1.5.5-3 to 1.5.5-5 ([#9431](https://github.com/opensearch-project/OpenSearch/pull/9431) +- Bump `netty` from 4.1.96.Final to 4.1.97.Final ([#9553](https://github.com/opensearch-project/OpenSearch/pull/9553)) +- Bump `io.grpc:grpc-api` from 1.57.1 to 1.57.2 ([#9578](https://github.com/opensearch-project/OpenSearch/pull/9578)) +- Add Encryption SDK dependencies ([#8466](https://github.com/opensearch-project/OpenSearch/pull/8466)) + +### Changed +- Default to mmapfs within hybridfs ([#8508](https://github.com/opensearch-project/OpenSearch/pull/8508)) +- Perform aggregation postCollection in ContextIndexSearcher after searching leaves ([#8303](https://github.com/opensearch-project/OpenSearch/pull/8303)) +- Make Span exporter configurable ([#8620](https://github.com/opensearch-project/OpenSearch/issues/8620)) +- Perform aggregation postCollection in ContextIndexSearcher after searching leaves ([#8303](https://github.com/opensearch-project/OpenSearch/pull/8303)) +- [Refactor] StreamIO from common to core.common namespace in core lib ([#8157](https://github.com/opensearch-project/OpenSearch/pull/8157)) +- [Refactor] Remaining HPPC to java.util collections ([#8730](https://github.com/opensearch-project/OpenSearch/pull/8730)) +- Remote Segment Store Repository setting moved from `index.remote_store.repository` to `index.remote_store.segment.repository` and `cluster.remote_store.repository` to `cluster.remote_store.segment.repository` respectively for Index and Cluster level settings ([#8719](https://github.com/opensearch-project/OpenSearch/pull/8719)) +- Change InternalSignificantTerms to sum shard-level superset counts only in final reduce ([#8735](https://github.com/opensearch-project/OpenSearch/pull/8735)) +- Exclude 'benchmarks' from codecov report ([#8805](https://github.com/opensearch-project/OpenSearch/pull/8805)) +- Create separate SourceLookup instance per segment slice in SignificantTextAggregatorFactory ([#8807](https://github.com/opensearch-project/OpenSearch/pull/8807)) +- Replace the deprecated IndexReader APIs with new storedFields() & termVectors() ([#7792](https://github.com/opensearch-project/OpenSearch/pull/7792)) +- [Remote Store] Add support to restore only unassigned shards of an index ([#8792](https://github.com/opensearch-project/OpenSearch/pull/8792)) +- Add safeguard limits for file cache during node level allocation ([#8208](https://github.com/opensearch-project/OpenSearch/pull/8208)) +- Performance improvements for BytesRefHash ([#8788](https://github.com/opensearch-project/OpenSearch/pull/8788)) +- Add support for aggregation profiler with concurrent aggregation ([#8801](https://github.com/opensearch-project/OpenSearch/pull/8801)) +- [Remove] Deprecated Fractional ByteSizeValue support #9005 ([#9005](https://github.com/opensearch-project/OpenSearch/pull/9005)) +- Add support for aggregation profiler with concurrent aggregation ([#8801](https://github.com/opensearch-project/OpenSearch/pull/8801)) +- [Remote Store] Restrict user override for remote store index level settings ([#8812](https://github.com/opensearch-project/OpenSearch/pull/8812)) +- [Refactor] MediaTypeParser to MediaTypeParserRegistry ([#8636](https://github.com/opensearch-project/OpenSearch/pull/8636)) +- Make MultiBucketConsumerService thread safe to use across slices during search ([#9047](https://github.com/opensearch-project/OpenSearch/pull/9047)) +- Removed blocking wait in TransportGetSnapshotsAction which was exhausting generic threadpool ([#8377](https://github.com/opensearch-project/OpenSearch/pull/8377)) +- Adds support for tracing runnable scenarios ([#8831](https://github.com/opensearch-project/OpenSearch/pull/8831)) +- Change shard_size and shard_min_doc_count evaluation to happen in shard level reduce phase ([#9085](https://github.com/opensearch-project/OpenSearch/pull/9085)) +- Add attributes to startSpan methods ([#9199](https://github.com/opensearch-project/OpenSearch/pull/9199)) +- [Refactor] Task foundation classes to core library - pt 1 ([#9082](https://github.com/opensearch-project/OpenSearch/pull/9082)) +- Add support for wrapping CollectorManager with profiling during concurrent execution ([#9129](https://github.com/opensearch-project/OpenSearch/pull/9129)) +- Add base class for parameterizing the search based tests #9083 ([#9083](https://github.com/opensearch-project/OpenSearch/pull/9083)) +- Add support for wrapping CollectorManager with profiling during concurrent execution ([#9129](https://github.com/opensearch-project/OpenSearch/pull/9129)) +- Rethrow OpenSearch exception for non-concurrent path while using concurrent search ([#9177](https://github.com/opensearch-project/OpenSearch/pull/9177)) +- Improve performance of encoding composite keys in multi-term aggregations ([#9412](https://github.com/opensearch-project/OpenSearch/pull/9412)) +- Refactor Compressors from CompressorFactory to CompressorRegistry for extensibility ([#9262](https://github.com/opensearch-project/OpenSearch/pull/9262)) +- Fix sort related ITs for concurrent search ([#9177](https://github.com/opensearch-project/OpenSearch/pull/9466) +- [Remote Store] Implicitly use replication type SEGMENT for remote store clusters ([#9264](https://github.com/opensearch-project/OpenSearch/pull/9264)) +- Add support to use trace propagated from client ([#9506](https://github.com/opensearch-project/OpenSearch/pull/9506)) +- Separate request-based and settings-based concurrent segment search controls and introduce AggregatorFactory method to determine concurrent search support ([#9469](https://github.com/opensearch-project/OpenSearch/pull/9469)) +- [Remote Store] Rate limiter integration for remote store uploads and downloads([#9448](https://github.com/opensearch-project/OpenSearch/pull/9448/)) +- [Remote Store] Implicitly use replication type SEGMENT for remote store clusters ([#9264](https://github.com/opensearch-project/OpenSearch/pull/9264)) +- Redefine telemetry context restoration and propagation ([#9617](https://github.com/opensearch-project/OpenSearch/pull/9617)) +- Use non-concurrent path for sort request on timeseries index and field([#9562](https://github.com/opensearch-project/OpenSearch/pull/9562)) +- Added sampler based on `Blanket Probabilistic Sampling rate` and `Override for on demand` ([#9621](https://github.com/opensearch-project/OpenSearch/issues/9621)) +- Decouple replication lag from logic to fail stale replicas ([#9507](https://github.com/opensearch-project/OpenSearch/pull/9507)) +- Improve performance of rounding dates in date_histogram aggregation ([#9727](https://github.com/opensearch-project/OpenSearch/pull/9727)) +- [Remote Store] Add support for Remote Translog Store stats in `_remotestore/stats/` API ([#9263](https://github.com/opensearch-project/OpenSearch/pull/9263)) +- Removing the vec file extension from INDEX_STORE_HYBRID_NIO_EXTENSIONS, to ensure the no performance degradation for vector search via Lucene Engine.([#9528](https://github.com/opensearch-project/OpenSearch/pull/9528))) +- Cleanup Unreferenced file on segment merge failure ([#9503](https://github.com/opensearch-project/OpenSearch/pull/9503)) +- Move zstd compression codec to external custom-codecs repository ([#9422](https://github.com/opensearch-project/OpenSearch/issues/9422]) +- [Remote Store] Add support for Remote Translog Store upload stats in `_nodes/stats/` API ([#8908](https://github.com/opensearch-project/OpenSearch/pull/8908)) +- [Remote Store] Removing feature flag to mark feature GA ([#9761](https://github.com/opensearch-project/OpenSearch/pull/9761)) + +### Removed +- Remove provision to create Remote Indices without Remote Translog Store ([#8719](https://github.com/opensearch-project/OpenSearch/pull/8719)) + +### Fixed +- Fix flaky ResourceAwareTasksTests.testBasicTaskResourceTracking test ([#8993](https://github.com/opensearch-project/OpenSearch/pull/8993)) +- Fix null_pointer_exception when creating or updating ingest pipeline ([#9259](https://github.com/opensearch-project/OpenSearch/pull/9259)) +- Fix memory leak when using Zstd Dictionary ([#9403](https://github.com/opensearch-project/OpenSearch/pull/9403)) +- Fix condition to remove index create block ([#9437](https://github.com/opensearch-project/OpenSearch/pull/9437)) +- Add support to clear archived index setting ([#9019](https://github.com/opensearch-project/OpenSearch/pull/9019)) +- Fix range reads in respository-s3 ([9512](https://github.com/opensearch-project/OpenSearch/issues/9512)) +- [Segment Replication] Fixed bug where replica shard temporarily serves stale data during an engine reset ([#9495](https://github.com/opensearch-project/OpenSearch/pull/9495)) +- Disable shard/segment level search_after short cutting if track_total_hits != false ([#9683](https://github.com/opensearch-project/OpenSearch/pull/9683)) +- [Segment Replication] Fixed bug where bytes behind metric is not accurate ([#9686](https://github.com/opensearch-project/OpenSearch/pull/9686)) diff --git a/release-notes/opensearch.release-notes-2.11.0.md b/release-notes/opensearch.release-notes-2.11.0.md new file mode 100644 index 0000000000000..040cc053469ed --- /dev/null +++ b/release-notes/opensearch.release-notes-2.11.0.md @@ -0,0 +1,68 @@ +## 2023-10-12 Version 2.11.0 Release Notes + +## [2.11] + +### Added +- Add coordinator level stats for search latency ([#8386](https://github.com/opensearch-project/OpenSearch/issues/8386)) +- Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681)) +- Add parallel file download support for remote store based replication ([#8596](https://github.com/opensearch-project/OpenSearch/pull/8596)) +- Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694)) +- [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666)) +- Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) +- Implement Visitor Design pattern in QueryBuilder to enable the capability to traverse through the complex QueryBuilder tree. ([#10110](https://github.com/opensearch-project/OpenSearch/pull/10110)) +- Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) +- Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) +- Add unreferenced file cleanup count to merge stats ([#10204](https://github.com/opensearch-project/OpenSearch/pull/10204)) +- Configurable merge policy for index with an option to choose from LogByteSize and Tiered merge policy ([#9992](https://github.com/opensearch-project/OpenSearch/pull/9992)) +- [Remote Store] Add support to restrict creation & deletion if system repository and mutation of immutable settings of system repository ([#9839](https://github.com/opensearch-project/OpenSearch/pull/9839)) +- Improve compressed request handling ([#10261](https://github.com/opensearch-project/OpenSearch/pull/10261)) + +### Dependencies +- Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) +- Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) +- Bump `actions/checkout` from 2 to 4 ([#9968](https://github.com/opensearch-project/OpenSearch/pull/9968)) +- Bump OpenTelemetry from 1.26.0 to 1.30.1 ([#9950](https://github.com/opensearch-project/OpenSearch/pull/9950)) +- Bump `org.apache.commons:commons-compress` from 1.23.0 to 1.24.0 ([#9973, #9972](https://github.com/opensearch-project/OpenSearch/pull/9973, https://github.com/opensearch-project/OpenSearch/pull/9972)) +- Bump `com.google.cloud:google-cloud-core-http` from 2.21.1 to 2.23.0 ([#9971](https://github.com/opensearch-project/OpenSearch/pull/9971)) +- Bump `mockito` from 5.4.0 to 5.5.0 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) +- Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) +- Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098)) +- Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125)) +- Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752)) +- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) +- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) +- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126)) +- Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.5 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206), [#10299](https://github.com/opensearch-project/OpenSearch/pull/10299)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` +- Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) +- Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) +- Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301)) +- Bump `org.apache.avro:avro` from 1.11.2 to 1.11.3 ([#10210](https://github.com/opensearch-project/OpenSearch/pull/10210)) +- Bump `netty` from 4.1.99.Final to 4.1.100.Final ([#10564](https://github.com/opensearch-project/OpenSearch/pull/10564)) + +### Changed +- Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) +- Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916)) +- Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) +- [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) +- Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) +- Disable concurrent segment search when terminate_after is used ([#10200](https://github.com/opensearch-project/OpenSearch/pull/10200)) +- Add instrumentation in Inbound Handler. ([#100143](https://github.com/opensearch-project/OpenSearch/pull/10143)) +- Enable remote segment upload backpressure by default ([#10356](https://github.com/opensearch-project/OpenSearch/pull/10356)) +- [Remote Store] Add support to reload repository metadata inplace ([#9569](https://github.com/opensearch-project/OpenSearch/pull/9569)) +- [Metrics Framework] Add Metrics framework. ([#10241](https://github.com/opensearch-project/OpenSearch/pull/10241)) +- Updating the separator for RemoteStoreLockManager since underscore is allowed in base64UUID url charset ([#10379](https://github.com/opensearch-project/OpenSearch/pull/10379)) + +### Removed +- Remove spurious SGID bit on directories ([#9447](https://github.com/opensearch-project/OpenSearch/pull/9447)) + +### Fixed +- Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725)) +- Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045)) +- Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) +- Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) +- Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256)) +- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) +- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index 189215b6562a3..f80c9f9c0bc80 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,4 +1,3 @@ ---- "Help": - skip: version: " - 2.3.99" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml index 1ce8468cb51f9..00ec838489f63 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml @@ -1,3 +1,29 @@ +"Test cat thread_pool total_wait_time output": + - skip: + version: " - 2.10.99" + reason: thread_pool total_wait_time stats were introduced in V_2.11.0 + + - do: + cat.thread_pool: {} + + - match: + $body: | + / #node_name name active queue rejected + ^ (\S+ \s+ \S+ \s+ \d+ \s+ \d+ \s+ \d+ \n)+ $/ + + - do: + cat.thread_pool: + thread_pool_patterns: search,search_throttled,index_searcher,generic + h: name,total_wait_time,twt + v: true + + - match: + $body: | + /^ name \s+ total_wait_time \s+ twt \n + (generic \s+ -1 \s+ -1 \n + search \s+ \d*\.*\d*\D+ \s+ \d*\.*\d*\D+ \n + search_throttled \s+ \d*\.*\d*\D+ \s+ \d*\.*\d*\D+ \n)+ $/ + --- "Test cat thread_pool output": - skip: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml index 1f1f42890355e..784c7b52b18b4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml @@ -138,6 +138,35 @@ - is_false: nodes.$node_id.indices.translog - is_false: nodes.$node_id.indices.recovery +--- +"Metric - indexing doc_status": + - skip: + version: " - 2.10.99" + reason: "Doc Status Stats were introduced in v2.11.0" + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id + + - do: + nodes.stats: { metric: indices, index_metric: indexing } + + - is_false: nodes.$node_id.indices.docs + - is_false: nodes.$node_id.indices.store + - is_true: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.indexing.doc_status + - is_false: nodes.$node_id.indices.get + - is_false: nodes.$node_id.indices.search + - is_false: nodes.$node_id.indices.merges + - is_false: nodes.$node_id.indices.refresh + - is_false: nodes.$node_id.indices.flush + - is_false: nodes.$node_id.indices.warmer + - is_false: nodes.$node_id.indices.query_cache + - is_false: nodes.$node_id.indices.fielddata + - is_false: nodes.$node_id.indices.completion + - is_false: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.indices.translog + - is_false: nodes.$node_id.indices.recovery --- "Metric - recovery": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_keyword_doc_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_keyword_doc_values.yml new file mode 100644 index 0000000000000..8829e7b100fdd --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_keyword_doc_values.yml @@ -0,0 +1,46 @@ +--- +"search on keyword fields with doc_values enabled": + - do: + indices.create: + index: test + body: + mappings: + properties: + "some_keyword": + type: "keyword" + index: true + doc_values: true + + - do: + bulk: + index: test + refresh: true + body: + - '{"index": {"_index": "test", "_id": "1" }}' + - '{ "some_keyword": "ingesting some random keyword data" }' + - '{ "index": { "_index": "test", "_id": "2" }}' + - '{ "some_keyword": "400" }' + - '{ "index": { "_index": "test", "_id": "3" } }' + - '{ "some_keyword": "5" }' + + - do: + search: + index: test + body: + query: + prefix: + some_keyword: "ing" + + - match: { hits.hits.0._source.some_keyword: "ingesting some random keyword data" } + + - do: + search: + index: test + body: + query: + range: { + "some_keyword": { + "lt": 500 + } } + + - match: { hits.total.value: 2 } diff --git a/server/build.gradle b/server/build.gradle index 9c409d77363cb..2f06ece06ca74 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -333,6 +333,11 @@ tasks.named("thirdPartyAudit").configure { 'org.osgi.framework.SynchronousBundleListener', 'org.osgi.framework.wiring.BundleWire', 'org.osgi.framework.wiring.BundleWiring', + 'org.zeromq.SocketType', + 'org.zeromq.ZContext', + 'org.zeromq.ZMonitor', + 'org.zeromq.ZMonitor$Event', + 'org.zeromq.ZMonitor$ZEvent', 'org.zeromq.ZMQ$Context', 'org.zeromq.ZMQ$Socket', 'org.zeromq.ZMQ', @@ -358,7 +363,9 @@ tasks.named("thirdPartyAudit").configure { 'com.google.protobuf.UnsafeUtil$Android32MemoryAccessor', 'com.google.protobuf.UnsafeUtil$Android64MemoryAccessor', 'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', - 'com.google.protobuf.UnsafeUtil$MemoryAccessor' + 'com.google.protobuf.UnsafeUtil$MemoryAccessor', + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil', + 'org.apache.logging.log4j.core.util.internal.UnsafeUtil$1' ) } diff --git a/server/licenses/jna-5.13.0.jar.sha1 b/server/licenses/jna-5.13.0.jar.sha1 new file mode 100644 index 0000000000000..faf2012f0b5c0 --- /dev/null +++ b/server/licenses/jna-5.13.0.jar.sha1 @@ -0,0 +1 @@ +1200e7ebeedbe0d10062093f32925a912020e747 \ No newline at end of file diff --git a/server/licenses/jna-5.5.0.jar.sha1 b/server/licenses/jna-5.5.0.jar.sha1 deleted file mode 100644 index 5621dfc743dd0..0000000000000 --- a/server/licenses/jna-5.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0e0845217c4907822403912ad6828d8e0b256208 diff --git a/server/licenses/log4j-api-2.20.0.jar.sha1 b/server/licenses/log4j-api-2.20.0.jar.sha1 deleted file mode 100644 index 37154d9861ac0..0000000000000 --- a/server/licenses/log4j-api-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1fe6082e660daf07c689a89c94dc0f49c26b44bb \ No newline at end of file diff --git a/server/licenses/log4j-api-2.21.0.jar.sha1 b/server/licenses/log4j-api-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..51446052594aa --- /dev/null +++ b/server/licenses/log4j-api-2.21.0.jar.sha1 @@ -0,0 +1 @@ +760192f2b69eacf4a4afc78e5a1d7a8de054fcbd \ No newline at end of file diff --git a/server/licenses/log4j-core-2.20.0.jar.sha1 b/server/licenses/log4j-core-2.20.0.jar.sha1 deleted file mode 100644 index 49c972626563b..0000000000000 --- a/server/licenses/log4j-core-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eb2a9a47b1396e00b5eee1264296729a70565cc0 \ No newline at end of file diff --git a/server/licenses/log4j-core-2.21.0.jar.sha1 b/server/licenses/log4j-core-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..c88e6f7a25ca9 --- /dev/null +++ b/server/licenses/log4j-core-2.21.0.jar.sha1 @@ -0,0 +1 @@ +122e1a9e0603cc9eae07b0846a6ff01f2454bc49 \ No newline at end of file diff --git a/server/licenses/log4j-jul-2.20.0.jar.sha1 b/server/licenses/log4j-jul-2.20.0.jar.sha1 deleted file mode 100644 index a456651e4569e..0000000000000 --- a/server/licenses/log4j-jul-2.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8170e6118eac1ab332046c179718a0f107f688e1 \ No newline at end of file diff --git a/server/licenses/log4j-jul-2.21.0.jar.sha1 b/server/licenses/log4j-jul-2.21.0.jar.sha1 new file mode 100644 index 0000000000000..480010840abca --- /dev/null +++ b/server/licenses/log4j-jul-2.21.0.jar.sha1 @@ -0,0 +1 @@ +f0da61113f4a47654677e6a98b1e13ca7de2483d \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.7.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.7.0.jar.sha1 deleted file mode 100644 index 45d8f459573b1..0000000000000 --- a/server/licenses/lucene-analysis-common-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -27ba6caaa4587a982cd451f7217b5a982bcfc44a \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.8.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..6ad304fa52c12 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.8.0.jar.sha1 @@ -0,0 +1 @@ +36f0363325ca7bf62c180160d1ed5165c7c37795 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.7.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.7.0.jar.sha1 deleted file mode 100644 index 3981ea4fa226e..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6389463bfbfcf902c8d31d12e9513a6818ac9d5e \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..f104c4207d390 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 @@ -0,0 +1 @@ +e98fb408028f40170e6d87c16422bfdc0bb2e392 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.7.0.jar.sha1 b/server/licenses/lucene-core-9.7.0.jar.sha1 deleted file mode 100644 index 2b0f77275c0ab..0000000000000 --- a/server/licenses/lucene-core-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad391210ffd806931334be9670a35af00c56f959 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.8.0.jar.sha1 b/server/licenses/lucene-core-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..f9a3e2f3cbee6 --- /dev/null +++ b/server/licenses/lucene-core-9.8.0.jar.sha1 @@ -0,0 +1 @@ +5e8421c5f8573bcf22e9265fc7e19469545a775a \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.7.0.jar.sha1 b/server/licenses/lucene-grouping-9.7.0.jar.sha1 deleted file mode 100644 index 90acbf6dcee8d..0000000000000 --- a/server/licenses/lucene-grouping-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8e6f0c229f4861be641047c33b05067176e4279c \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.8.0.jar.sha1 b/server/licenses/lucene-grouping-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..ab132121b2edc --- /dev/null +++ b/server/licenses/lucene-grouping-9.8.0.jar.sha1 @@ -0,0 +1 @@ +d39184518351178c404ed9669fc6cb6111f2288d \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.7.0.jar.sha1 b/server/licenses/lucene-highlighter-9.7.0.jar.sha1 deleted file mode 100644 index bfcca0bc6cb5b..0000000000000 --- a/server/licenses/lucene-highlighter-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -facb7c7ee0f75ed457a2d98f10d6430e25a53691 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.8.0.jar.sha1 b/server/licenses/lucene-highlighter-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..c7cb678fb7b72 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.8.0.jar.sha1 @@ -0,0 +1 @@ +1ac38c8278dbd63dfab30744a41dd955a415a31c \ No newline at end of file diff --git a/server/licenses/lucene-join-9.7.0.jar.sha1 b/server/licenses/lucene-join-9.7.0.jar.sha1 deleted file mode 100644 index 0dab3a7ddc41a..0000000000000 --- a/server/licenses/lucene-join-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d041bdc0947a14223cf68357407ee18b21027587 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.8.0.jar.sha1 b/server/licenses/lucene-join-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..2b6cb8af4faf6 --- /dev/null +++ b/server/licenses/lucene-join-9.8.0.jar.sha1 @@ -0,0 +1 @@ +3d64fc57bb6e718d906413a9f73c713e6d4d8bb0 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.7.0.jar.sha1 b/server/licenses/lucene-memory-9.7.0.jar.sha1 deleted file mode 100644 index 357a9c4b2ea26..0000000000000 --- a/server/licenses/lucene-memory-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0fade51ee353e15ddbbc45262aafe6f99ed020f1 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.8.0.jar.sha1 b/server/licenses/lucene-memory-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..5fdfee401dd0a --- /dev/null +++ b/server/licenses/lucene-memory-9.8.0.jar.sha1 @@ -0,0 +1 @@ +5283ac71d6ccecb5e00c7b52df2faec012f2625a \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.7.0.jar.sha1 b/server/licenses/lucene-misc-9.7.0.jar.sha1 deleted file mode 100644 index da5e1921626b2..0000000000000 --- a/server/licenses/lucene-misc-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7fcf451e2376526c3a027958812866cc5b0ff13f \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.8.0.jar.sha1 b/server/licenses/lucene-misc-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..cf815cba15862 --- /dev/null +++ b/server/licenses/lucene-misc-9.8.0.jar.sha1 @@ -0,0 +1 @@ +9a57b049cf51a5e9c9c1909c420f645f1b6f9a54 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.7.0.jar.sha1 b/server/licenses/lucene-queries-9.7.0.jar.sha1 deleted file mode 100644 index fa82e95a7e19f..0000000000000 --- a/server/licenses/lucene-queries-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -126989d4622419aa06fcbf3a342e859cab8c8799 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.8.0.jar.sha1 b/server/licenses/lucene-queries-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..09f369ef18e12 --- /dev/null +++ b/server/licenses/lucene-queries-9.8.0.jar.sha1 @@ -0,0 +1 @@ +628db4ef46f1c6a05145bdac1d1bc4ace6341b13 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.7.0.jar.sha1 b/server/licenses/lucene-queryparser-9.7.0.jar.sha1 deleted file mode 100644 index 438db0aea66e1..0000000000000 --- a/server/licenses/lucene-queryparser-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6e77bde908ff698354e4a2149e6dd4658b56d7b0 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.8.0.jar.sha1 b/server/licenses/lucene-queryparser-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..2a42a8956b18b --- /dev/null +++ b/server/licenses/lucene-queryparser-9.8.0.jar.sha1 @@ -0,0 +1 @@ +982faf2bfa55542bf57fbadef54c19ac00f57cae \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.7.0.jar.sha1 b/server/licenses/lucene-sandbox-9.7.0.jar.sha1 deleted file mode 100644 index 38b0b1cccbc29..0000000000000 --- a/server/licenses/lucene-sandbox-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9f3e8e1947f2f1c5784132444af51a060ff0b4bf \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.8.0.jar.sha1 b/server/licenses/lucene-sandbox-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..64a0b07f72d29 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.8.0.jar.sha1 @@ -0,0 +1 @@ +06493dbd14d02537716822254866a94458f4d842 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.7.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.7.0.jar.sha1 deleted file mode 100644 index 48679df469fd1..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -01b0bc7a407d8c35a70a1adf7966bb3e7caae928 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..d1bcb0581435c --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 @@ -0,0 +1 @@ +9d9a731822ad6eefa1ba288a0c158d478522f165 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.7.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.7.0.jar.sha1 deleted file mode 100644 index 55d4d217fa6b9..0000000000000 --- a/server/licenses/lucene-spatial3d-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7c6b1b6e0a70c9cd177371e648648c2f896742a2 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.8.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..d17459cc569a9 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.8.0.jar.sha1 @@ -0,0 +1 @@ +ce752a52b2d4eac90633c7df7982e29504f99e76 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.7.0.jar.sha1 b/server/licenses/lucene-suggest-9.7.0.jar.sha1 deleted file mode 100644 index d4d7e6cd6bed9..0000000000000 --- a/server/licenses/lucene-suggest-9.7.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5c37fd9a5d71dc87fe1cd4c18ff295ec8cfac170 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.8.0.jar.sha1 b/server/licenses/lucene-suggest-9.8.0.jar.sha1 new file mode 100644 index 0000000000000..ff47b87672d2c --- /dev/null +++ b/server/licenses/lucene-suggest-9.8.0.jar.sha1 @@ -0,0 +1 @@ +f977f96f2093b7fddea6b67caa2e1c5b10edebf6 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java index 6343bd127c458..4c9f49df71257 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java @@ -60,8 +60,8 @@ public class HotThreadsIT extends OpenSearchIntegTestCase { public void testHotThreadsDontFail() throws ExecutionException, InterruptedException { - /** - * This test just checks if nothing crashes or gets stuck etc. + /* + This test just checks if nothing crashes or gets stuck etc. */ createIndex("test"); final int iters = scaledRandomIntBetween(2, 20); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java index 0197ccf059737..44ba585016d8e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java @@ -112,7 +112,7 @@ protected int numberOfEvents(String actionMasks, Function findEvents(String actionMasks, Function, Boolean> criteria) { List events = new ArrayList<>(); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index f11c696310b39..c4dcedcc722cf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -31,6 +31,8 @@ package org.opensearch.action.admin.cluster.node.tasks; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.ResourceNotFoundException; import org.opensearch.action.ActionRequest; @@ -49,6 +51,8 @@ import org.opensearch.common.SetOnce; import org.opensearch.common.action.ActionFuture; import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.set.Sets; @@ -65,7 +69,7 @@ import org.opensearch.tasks.TaskInfo; import org.opensearch.tasks.TaskManager; import org.opensearch.test.InternalTestCluster; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportResponseHandler; @@ -74,6 +78,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -86,6 +91,7 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; @@ -93,7 +99,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -public class CancellableTasksIT extends OpenSearchIntegTestCase { +public class CancellableTasksIT extends ParameterizedOpenSearchIntegTestCase { static int idGenerator = 0; static final Map beforeSendLatches = ConcurrentCollections.newConcurrentMap(); @@ -101,6 +107,23 @@ public class CancellableTasksIT extends OpenSearchIntegTestCase { static final Map beforeExecuteLatches = ConcurrentCollections.newConcurrentMap(); static final Map completedLatches = ConcurrentCollections.newConcurrentMap(); + public CancellableTasksIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Before public void resetTestStates() { idGenerator = 0; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java index ceacb028698de..e6fd9139d45f2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/ConcurrentSearchTasksIT.java @@ -30,7 +30,7 @@ /** * Integration tests for task management API with Concurrent Segment Search - * + *

      * The way the test framework bootstraps the test cluster makes it difficult to parameterize the feature flag. * Once concurrent search is moved behind a cluster setting we can parameterize these tests behind the setting. */ @@ -72,7 +72,7 @@ protected Settings featureFlagSettings() { /** * Tests the number of threads that worked on a search task. - * + *

      * Currently, we try to control concurrency by creating an index with 7 segments and rely on * the way concurrent search creates leaf slices from segments. Once more concurrency controls are introduced * we should improve this test to use those methods. @@ -108,8 +108,9 @@ public void testConcurrentSearchTaskTracking() { assertEquals(mainTaskInfo.getTaskId(), taskInfo.getParentTaskId()); Map> threadStats = getThreadStats(SearchAction.NAME + "[*]", taskInfo.getTaskId()); - // Concurrent search forks each slice of 5 segments to different thread - assertEquals((int) Math.ceil(getSegmentCount(INDEX_NAME) / 5.0), threadStats.size()); + // Concurrent search forks each slice of 5 segments to different thread (see please + // https://github.com/apache/lucene/issues/12498) + assertEquals((int) Math.ceil(getSegmentCount(INDEX_NAME) / 5.0) + 1, threadStats.size()); // assert that all task descriptions have non-zero length MatcherAssert.assertThat(taskInfo.getDescription().length(), greaterThan(0)); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index aff7c5d9876ac..36fe3748e9d10 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -46,7 +46,7 @@ /** * This class tests that repository operations (Put, Delete, Verify) are blocked when the cluster is read-only. - * + *

      * The @NodeScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". */ @ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index 347011721c728..78fb01b07b6b1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -53,7 +53,7 @@ /** * This class tests that snapshot operations (Create, Delete, Restore) are blocked when the cluster is read-only. - * + *

      * The @NodeScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". */ @ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java new file mode 100644 index 0000000000000..a081110e6c5a1 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.create; + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +import org.opensearch.Version; +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.admin.indices.shrink.ResizeType; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.VersionUtils; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; + +public class RemoteCloneIndexIT extends RemoteStoreBaseIntegTestCase { + + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public void testCreateCloneIndex() { + Version version = VersionUtils.randomIndexCompatibleVersion(random()); + int numPrimaryShards = randomIntBetween(1, 5); + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", numPrimaryShards).put("index.version.created", version) + ).get(); + final int docs = randomIntBetween(0, 128); + for (int i = 0; i < docs; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + internalCluster().ensureAtLeastNumDataNodes(2); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + + final IndicesStatsResponse sourceStats = client().admin().indices().prepareStats("source").setSegments(true).get(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")) + .get(); + try { + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setResizeType(ResizeType.CLONE) + .setSettings(Settings.builder().put("index.number_of_replicas", 0).putNull("index.blocks.write").build()) + .get() + ); + ensureGreen(); + + final IndicesStatsResponse targetStats = client().admin().indices().prepareStats("target").get(); + assertThat(targetStats.getIndex("target").getIndexShards().keySet().size(), equalTo(numPrimaryShards)); + + final int size = docs > 0 ? 2 * docs : 1; + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + + for (int i = docs; i < 2 * docs; i++) { + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + flushAndRefresh(); + assertHitCount( + client().prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + 2 * docs + ); + assertHitCount(client().prepareSearch("source").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null)); + } finally { + // clean up + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null) + ) + .get(); + } + + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java new file mode 100644 index 0000000000000..282eb9c6ad95e --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteShrinkIndexIT.java @@ -0,0 +1,545 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.create; + +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedSetSelector; +import org.apache.lucene.search.SortedSetSortField; +import org.apache.lucene.util.Constants; +import org.opensearch.Version; +import org.opensearch.action.admin.cluster.reroute.ClusterRerouteResponse; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.support.ActiveShardCount; +import org.opensearch.client.Client; +import org.opensearch.cluster.ClusterInfoService; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.InternalClusterInfoService; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.Murmur3HashFunction; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; +import org.opensearch.index.engine.SegmentsStats; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.index.seqno.SeqNoStats; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.VersionUtils; + +import java.util.Arrays; +import java.util.Map; +import java.util.stream.IntStream; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +public class RemoteShrinkIndexIT extends RemoteStoreBaseIntegTestCase { + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + } + + public void testCreateShrinkIndexToN() { + + assumeFalse("https://github.com/elastic/elasticsearch/issues/34080", Constants.WINDOWS); + + int[][] possibleShardSplits = new int[][] { { 8, 4, 2 }, { 9, 3, 1 }, { 4, 2, 1 }, { 15, 5, 1 } }; + int[] shardSplits = randomFrom(possibleShardSplits); + assertEquals(shardSplits[0], (shardSplits[0] / shardSplits[1]) * shardSplits[1]); + assertEquals(shardSplits[1], (shardSplits[1] / shardSplits[2]) * shardSplits[2]); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", shardSplits[0])).get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source") + .setId(Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON) + .get(); + } + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + String mergeNode = discoveryNodes[0].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)) + .get(); + ensureGreen(); + // now merge source into a 4 shard index + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "first_shrink") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", shardSplits[1]) + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + + for (int i = 0; i < 20; i++) { // now update + client().prepareIndex("first_shrink") + .setId(Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON) + .get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("first_shrink") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)) + .get(); + ensureGreen(); + // now merge source into a 2 shard index + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("first_shrink", "second_shrink") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", shardSplits[2]) + .putNull("index.blocks.write") + .putNull("index.routing.allocation.require._name") + .build() + ) + .get() + ); + ensureGreen(); + assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + // let it be allocated anywhere and bump replicas + client().admin() + .indices() + .prepareUpdateSettings("second_shrink") + .setSettings(Settings.builder().putNull("index.routing.allocation.include._id").put("index.number_of_replicas", 0)) + .get(); + ensureGreen(); + assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + + for (int i = 0; i < 20; i++) { // now update + client().prepareIndex("second_shrink") + .setId(Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON) + .get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("second_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + assertHitCount(client().prepareSearch("first_shrink").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + } + + public void testShrinkIndexPrimaryTerm() throws Exception { + int numberOfShards = randomIntBetween(2, 20); + int numberOfTargetShards = randomValueOtherThanMany(n -> numberOfShards % n != 0, () -> randomIntBetween(1, numberOfShards - 1)); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", numberOfShards)).get(); + + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertThat(dataNodes.size(), greaterThanOrEqualTo(2)); + final DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + final String mergeNode = discoveryNodes[0].getName(); + // This needs more than the default timeout if a large number of shards were created. + ensureGreen(TimeValue.timeValueSeconds(120)); + + // fail random primary shards to force primary terms to increase + final Index source = resolveIndex("source"); + final int iterations = scaledRandomIntBetween(0, 16); + for (int i = 0; i < iterations; i++) { + final String node = randomSubsetOf(1, internalCluster().nodesInclude("source")).get(0); + final IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); + final IndexService indexShards = indexServices.indexServiceSafe(source); + for (final Integer shardId : indexShards.shardIds()) { + final IndexShard shard = indexShards.getShard(shardId); + if (shard.routingEntry().primary() && randomBoolean()) { + disableAllocation("source"); + shard.failShard("test", new Exception("test")); + // this can not succeed until the shard is failed and a replica is promoted + int id = 0; + while (true) { + // find an ID that routes to the right shard, we will only index to the shard that saw a primary failure + final String s = Integer.toString(id); + final int hash = Math.floorMod(Murmur3HashFunction.hash(s), numberOfShards); + if (hash == shardId) { + final IndexRequest request = new IndexRequest("source").id(s) + .source("{ \"f\": \"" + s + "\"}", MediaTypeRegistry.JSON); + client().index(request).get(); + break; + } else { + id++; + } + } + enableAllocation("source"); + ensureGreen(); + } + } + } + + // relocate all shards to one node such that we can merge it. + final Settings.Builder prepareShrinkSettings = Settings.builder() + .put("index.routing.allocation.require._name", mergeNode) + .put("index.blocks.write", true); + client().admin().indices().prepareUpdateSettings("source").setSettings(prepareShrinkSettings).get(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to relocate many shards + + final IndexMetadata indexMetadata = indexMetadata(client(), "source"); + final long beforeShrinkPrimaryTerm = IntStream.range(0, numberOfShards).mapToLong(indexMetadata::primaryTerm).max().getAsLong(); + + // now merge source into target + final Settings shrinkSettings = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", numberOfTargetShards) + .build(); + assertAcked(client().admin().indices().prepareResizeIndex("source", "target").setSettings(shrinkSettings).get()); + + ensureGreen(TimeValue.timeValueSeconds(120)); + + final IndexMetadata afterShrinkIndexMetadata = indexMetadata(client(), "target"); + for (int shardId = 0; shardId < numberOfTargetShards; shardId++) { + assertThat(afterShrinkIndexMetadata.primaryTerm(shardId), equalTo(beforeShrinkPrimaryTerm + 1)); + } + } + + private static IndexMetadata indexMetadata(final Client client, final String index) { + final ClusterStateResponse clusterStateResponse = client.admin().cluster().state(new ClusterStateRequest()).actionGet(); + return clusterStateResponse.getState().metadata().index(index); + } + + public void testCreateShrinkIndex() { + internalCluster().ensureAtLeastNumDataNodes(2); + Version version = VersionUtils.randomVersion(random()); + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7)).put("index.version.created", version) + ).get(); + final int docs = randomIntBetween(0, 128); + for (int i = 0; i < docs; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings( + Settings.builder() + .put("index.routing.allocation.require._name", discoveryNodes[0].getName()) + .put("index.blocks.write", true) + ) + .get(); + ensureGreen(); + + final IndicesStatsResponse sourceStats = client().admin().indices().prepareStats("source").setSegments(true).get(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")) + .get(); + + // now merge source into a single shard index + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .putNull("index.blocks.write") + .putNull("index.routing.allocation.require._name") + .build() + ) + .get() + ); + ensureGreen(); + + // resolve true merge node - this is not always the node we required as all shards may be on another node + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + DiscoveryNode mergeNode = state.nodes().get(state.getRoutingTable().index("target").shard(0).primaryShard().currentNodeId()); + logger.info("merge node {}", mergeNode); + + final long maxSeqNo = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getSeqNoStats) + .mapToLong(SeqNoStats::getMaxSeqNo) + .max() + .getAsLong(); + final long maxUnsafeAutoIdTimestamp = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getStats) + .map(CommonStats::getSegments) + .mapToLong(SegmentsStats::getMaxUnsafeAutoIdTimestamp) + .max() + .getAsLong(); + + final IndicesStatsResponse targetStats = client().admin().indices().prepareStats("target").get(); + for (final ShardStats shardStats : targetStats.getShards()) { + final SeqNoStats seqNoStats = shardStats.getSeqNoStats(); + final ShardRouting shardRouting = shardStats.getShardRouting(); + assertThat("failed on " + shardRouting, seqNoStats.getMaxSeqNo(), equalTo(maxSeqNo)); + assertThat("failed on " + shardRouting, seqNoStats.getLocalCheckpoint(), equalTo(maxSeqNo)); + assertThat( + "failed on " + shardRouting, + shardStats.getStats().getSegments().getMaxUnsafeAutoIdTimestamp(), + equalTo(maxUnsafeAutoIdTimestamp) + ); + } + + final int size = docs > 0 ? 2 * docs : 1; + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + + for (int i = docs; i < 2 * docs; i++) { + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 2 * docs); + assertHitCount(client().prepareSearch("source").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null)); + + // clean up + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null) + ) + .get(); + } + + /** + * Tests that we can manually recover from a failed allocation due to shards being moved away etc. + */ + public void testCreateShrinkIndexFails() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7)).put("number_of_replicas", 0) + ).get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + String spareNode = discoveryNodes[0].getName(); + String mergeNode = discoveryNodes[1].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)) + .get(); + ensureGreen(); + + // now merge source into a single shard index + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setWaitForActiveShards(ActiveShardCount.NONE) + .setSettings( + Settings.builder() + .put("index.routing.allocation.exclude._name", mergeNode) // we manually exclude the merge node to forcefully fuck it up + .put("index.number_of_replicas", 0) + .put("index.allocation.max_retries", 1) + .build() + ) + .get(); + client().admin().cluster().prepareHealth("target").setWaitForEvents(Priority.LANGUID).get(); + + // now we move all shards away from the merge node + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", spareNode).put("index.blocks.write", true)) + .get(); + ensureGreen("source"); + + client().admin() + .indices() + .prepareUpdateSettings("target") // erase the forcefully fuckup! + .setSettings(Settings.builder().putNull("index.routing.allocation.exclude._name")) + .get(); + // wait until it fails + assertBusy(() -> { + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); + RoutingTable routingTables = clusterStateResponse.getState().routingTable(); + assertTrue(routingTables.index("target").shard(0).getShards().get(0).unassigned()); + assertEquals( + UnassignedInfo.Reason.ALLOCATION_FAILED, + routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getReason() + ); + assertEquals(1, routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getNumFailedAllocations()); + }); + client().admin() + .indices() + .prepareUpdateSettings("source") // now relocate them all to the right node + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode)) + .get(); + ensureGreen("source"); + + final InternalClusterInfoService infoService = (InternalClusterInfoService) internalCluster().getInstance( + ClusterInfoService.class, + internalCluster().getClusterManagerName() + ); + infoService.refresh(); + // kick off a retry and wait until it's done! + ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + long expectedShardSize = clusterRerouteResponse.getState() + .routingTable() + .index("target") + .shard(0) + .getShards() + .get(0) + .getExpectedShardSize(); + // we support the expected shard size in the allocator to sum up over the source index shards + assertTrue("expected shard size must be set but wasn't: " + expectedShardSize, expectedShardSize > 0); + ensureGreen(); + assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20); + } + + public void testCreateShrinkWithIndexSort() throws Exception { + SortField expectedSortField = new SortedSetSortField("id", true, SortedSetSelector.Type.MAX); + expectedSortField.setMissingValue(SortedSetSortField.STRING_FIRST); + Sort expectedIndexSort = new Sort(expectedSortField); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings( + Settings.builder() + .put(indexSettings()) + .put("sort.field", "id") + .put("sort.order", "desc") + .put("number_of_shards", 8) + .put("number_of_replicas", 0) + ).setMapping("id", "type=keyword,doc_values=true").get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source") + .setId(Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", MediaTypeRegistry.JSON) + .get(); + } + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + String mergeNode = discoveryNodes[0].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + + flushAndRefresh(); + assertSortedSegments("source", expectedIndexSort); + + // relocate all shards to one node such that we can merge it. + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)) + .get(); + ensureGreen(); + + // check that index sort cannot be set on the target index + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", "2") + .put("index.sort.field", "foo") + .build() + ) + .get() + ); + assertThat(exc.getMessage(), containsString("can't override index sort when resizing an index")); + + // check that the index sort order of `source` is correctly applied to the `target` + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", "2") + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + flushAndRefresh(); + GetSettingsResponse settingsResponse = client().admin().indices().prepareGetSettings("target").execute().actionGet(); + assertEquals(settingsResponse.getSetting("target", "index.sort.field"), "id"); + assertEquals(settingsResponse.getSetting("target", "index.sort.order"), "desc"); + assertSortedSegments("target", expectedIndexSort); + + // ... and that the index sort is also applied to updates + for (int i = 20; i < 40; i++) { + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + flushAndRefresh(); + assertSortedSegments("target", expectedIndexSort); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java new file mode 100644 index 0000000000000..dd4252d24f314 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteSplitIndexIT.java @@ -0,0 +1,506 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.admin.indices.create; + +import org.apache.lucene.search.join.ScoreMode; +import org.apache.lucene.util.Constants; +import org.opensearch.Version; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.admin.indices.shrink.ResizeType; +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.get.GetResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.MetadataCreateIndexService; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.Murmur3HashFunction; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; +import org.opensearch.index.engine.SegmentsStats; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.index.seqno.SeqNoStats; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.test.VersionUtils; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.stream.IntStream; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.query.QueryBuilders.nestedQuery; +import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; + +public class RemoteSplitIndexIT extends RemoteStoreBaseIntegTestCase { + + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + } + + public void testCreateSplitIndexToN() throws IOException { + int[][] possibleShardSplits = new int[][] { { 2, 4, 8 }, { 3, 6, 12 }, { 1, 2, 4 } }; + int[] shardSplits = randomFrom(possibleShardSplits); + splitToN(shardSplits[0], shardSplits[1], shardSplits[2]); + } + + public void testSplitFromOneToN() { + + assumeFalse("https://github.com/elastic/elasticsearch/issues/34080", Constants.WINDOWS); + + splitToN(1, 5, 10); + client().admin().indices().prepareDelete("*").get(); + int randomSplit = randomIntBetween(2, 6); + splitToN(1, randomSplit, randomSplit * 2); + } + + private void splitToN(int sourceShards, int firstSplitShards, int secondSplitShards) { + + assertEquals(sourceShards, (sourceShards * firstSplitShards) / firstSplitShards); + assertEquals(firstSplitShards, (firstSplitShards * secondSplitShards) / secondSplitShards); + internalCluster().ensureAtLeastNumDataNodes(2); + final boolean useRouting = randomBoolean(); + final boolean useNested = randomBoolean(); + final boolean useMixedRouting = useRouting ? randomBoolean() : false; + CreateIndexRequestBuilder createInitialIndex = prepareCreate("source"); + Settings.Builder settings = Settings.builder().put(indexSettings()).put("number_of_shards", sourceShards); + final boolean useRoutingPartition; + if (randomBoolean()) { + // randomly set the value manually + int routingShards = secondSplitShards * randomIntBetween(1, 10); + settings.put("index.number_of_routing_shards", routingShards); + useRoutingPartition = false; + } else { + useRoutingPartition = randomBoolean(); + } + if (useRouting && useMixedRouting == false && useRoutingPartition) { + int numRoutingShards = MetadataCreateIndexService.calculateNumRoutingShards(secondSplitShards, Version.CURRENT) - 1; + settings.put("index.routing_partition_size", randomIntBetween(1, numRoutingShards)); + if (useNested) { + createInitialIndex.setMapping("_routing", "required=true", "nested1", "type=nested"); + } else { + createInitialIndex.setMapping("_routing", "required=true"); + } + } else if (useNested) { + createInitialIndex.setMapping("nested1", "type=nested"); + } + logger.info("use routing {} use mixed routing {} use nested {}", useRouting, useMixedRouting, useNested); + createInitialIndex.setSettings(settings).get(); + + int numDocs = randomIntBetween(10, 50); + String[] routingValue = new String[numDocs]; + + BiFunction indexFunc = (index, id) -> { + try { + return client().prepareIndex(index) + .setId(Integer.toString(id)) + .setSource( + jsonBuilder().startObject() + .field("foo", "bar") + .field("i", id) + .startArray("nested1") + .startObject() + .field("n_field1", "n_value1_1") + .field("n_field2", "n_value2_1") + .endObject() + .startObject() + .field("n_field1", "n_value1_2") + .field("n_field2", "n_value2_2") + .endObject() + .endArray() + .endObject() + ); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }; + for (int i = 0; i < numDocs; i++) { + IndexRequestBuilder builder = indexFunc.apply("source", i); + if (useRouting) { + String routing = randomRealisticUnicodeOfCodepointLengthBetween(1, 10); + if (useMixedRouting && randomBoolean()) { + routingValue[i] = null; + } else { + routingValue[i] = routing; + } + builder.setRouting(routingValue[i]); + } + builder.get(); + } + + if (randomBoolean()) { + for (int i = 0; i < numDocs; i++) { // let's introduce some updates / deletes on the index + if (randomBoolean()) { + IndexRequestBuilder builder = indexFunc.apply("source", i); + if (useRouting) { + builder.setRouting(routingValue[i]); + } + builder.get(); + } + } + } + + ensureYellow(); + client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + Settings.Builder firstSplitSettingsBuilder = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", firstSplitShards) + .putNull("index.blocks.write"); + if (sourceShards == 1 && useRoutingPartition == false && randomBoolean()) { // try to set it if we have a source index with 1 shard + firstSplitSettingsBuilder.put("index.number_of_routing_shards", secondSplitShards); + } + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "first_split") + .setResizeType(ResizeType.SPLIT) + .setSettings(firstSplitSettingsBuilder.build()) + .get() + ); + ensureGreen(); + assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + + for (int i = 0; i < numDocs; i++) { // now update + IndexRequestBuilder builder = indexFunc.apply("first_split", i); + if (useRouting) { + builder.setRouting(routingValue[i]); + } + builder.get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + for (int i = 0; i < numDocs; i++) { + GetResponse getResponse = client().prepareGet("first_split", Integer.toString(i)).setRouting(routingValue[i]).get(); + assertTrue(getResponse.isExists()); + } + + client().admin() + .indices() + .prepareUpdateSettings("first_split") + .setSettings(Settings.builder().put("index.blocks.write", true)) + .get(); + ensureGreen(); + // now split source into a new index + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("first_split", "second_split") + .setResizeType(ResizeType.SPLIT) + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", secondSplitShards) + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + // let it be allocated anywhere and bump replicas + client().admin() + .indices() + .prepareUpdateSettings("second_split") + .setSettings(Settings.builder().put("index.number_of_replicas", 0)) + .get(); + ensureGreen(); + assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + + for (int i = 0; i < numDocs; i++) { // now update + IndexRequestBuilder builder = indexFunc.apply("second_split", i); + if (useRouting) { + builder.setRouting(routingValue[i]); + } + builder.get(); + } + flushAndRefresh(); + for (int i = 0; i < numDocs; i++) { + GetResponse getResponse = client().prepareGet("second_split", Integer.toString(i)).setRouting(routingValue[i]).get(); + assertTrue(getResponse.isExists()); + } + assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + if (useNested) { + assertNested("source", numDocs); + assertNested("first_split", numDocs); + assertNested("second_split", numDocs); + } + assertAllUniqueDocs( + client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + numDocs + ); + assertAllUniqueDocs( + client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + numDocs + ); + assertAllUniqueDocs(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + } + + public void assertNested(String index, int numDocs) { + // now, do a nested query + SearchResponse searchResponse = client().prepareSearch(index) + .setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1_1"), ScoreMode.Avg)) + .get(); + assertNoFailures(searchResponse); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numDocs)); + } + + public void assertAllUniqueDocs(SearchResponse response, int numDocs) { + Set ids = new HashSet<>(); + for (int i = 0; i < response.getHits().getHits().length; i++) { + String id = response.getHits().getHits()[i].getId(); + assertTrue("found ID " + id + " more than once", ids.add(id)); + } + assertEquals(numDocs, ids.size()); + } + + public void testSplitIndexPrimaryTerm() throws Exception { + int numberOfTargetShards = randomIntBetween(2, 20); + int numberOfShards = randomValueOtherThanMany(n -> numberOfTargetShards % n != 0, () -> between(1, numberOfTargetShards - 1)); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings( + Settings.builder() + .put(indexSettings()) + .put("number_of_shards", numberOfShards) + .put("index.number_of_routing_shards", numberOfTargetShards) + ).get(); + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to allocate many shards + + // fail random primary shards to force primary terms to increase + final Index source = resolveIndex("source"); + final int iterations = scaledRandomIntBetween(0, 16); + for (int i = 0; i < iterations; i++) { + final String node = randomSubsetOf(1, internalCluster().nodesInclude("source")).get(0); + final IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); + final IndexService indexShards = indexServices.indexServiceSafe(source); + for (final Integer shardId : indexShards.shardIds()) { + final IndexShard shard = indexShards.getShard(shardId); + if (shard.routingEntry().primary() && randomBoolean()) { + disableAllocation("source"); + shard.failShard("test", new Exception("test")); + // this can not succeed until the shard is failed and a replica is promoted + int id = 0; + while (true) { + // find an ID that routes to the right shard, we will only index to the shard that saw a primary failure + final String s = Integer.toString(id); + final int hash = Math.floorMod(Murmur3HashFunction.hash(s), numberOfShards); + if (hash == shardId) { + final IndexRequest request = new IndexRequest("source").id(s) + .source("{ \"f\": \"" + s + "\"}", MediaTypeRegistry.JSON); + client().index(request).get(); + break; + } else { + id++; + } + } + enableAllocation("source"); + ensureGreen(); + } + } + } + + final Settings.Builder prepareSplitSettings = Settings.builder().put("index.blocks.write", true); + client().admin().indices().prepareUpdateSettings("source").setSettings(prepareSplitSettings).get(); + ensureYellow(); + + final IndexMetadata indexMetadata = indexMetadata(client(), "source"); + final long beforeSplitPrimaryTerm = IntStream.range(0, numberOfShards).mapToLong(indexMetadata::primaryTerm).max().getAsLong(); + + // now split source into target + final Settings splitSettings = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", numberOfTargetShards) + .putNull("index.blocks.write") + .build(); + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setResizeType(ResizeType.SPLIT) + .setSettings(splitSettings) + .get() + ); + + ensureGreen(TimeValue.timeValueSeconds(120)); // needs more than the default to relocate many shards + + final IndexMetadata aftersplitIndexMetadata = indexMetadata(client(), "target"); + for (int shardId = 0; shardId < numberOfTargetShards; shardId++) { + assertThat(aftersplitIndexMetadata.primaryTerm(shardId), equalTo(beforeSplitPrimaryTerm + 1)); + } + } + + private static IndexMetadata indexMetadata(final Client client, final String index) { + final ClusterStateResponse clusterStateResponse = client.admin().cluster().state(new ClusterStateRequest()).actionGet(); + return clusterStateResponse.getState().metadata().index(index); + } + + public void testCreateSplitIndex() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(2); + Version version = VersionUtils.randomIndexCompatibleVersion(random()); + prepareCreate("source").setSettings( + Settings.builder().put(indexSettings()).put("number_of_shards", 1).put("index.version.created", version) + ).get(); + final int docs = randomIntBetween(0, 128); + for (int i = 0; i < docs; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + + final IndicesStatsResponse sourceStats = client().admin().indices().prepareStats("source").setSegments(true).get(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")) + .get(); + try { + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setResizeType(ResizeType.SPLIT) + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", 2) + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + DiscoveryNode mergeNode = state.nodes().get(state.getRoutingTable().index("target").shard(0).primaryShard().currentNodeId()); + logger.info("split node {}", mergeNode); + + final long maxSeqNo = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getSeqNoStats) + .mapToLong(SeqNoStats::getMaxSeqNo) + .max() + .getAsLong(); + final long maxUnsafeAutoIdTimestamp = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getStats) + .map(CommonStats::getSegments) + .mapToLong(SegmentsStats::getMaxUnsafeAutoIdTimestamp) + .max() + .getAsLong(); + + final IndicesStatsResponse targetStats = client().admin().indices().prepareStats("target").get(); + for (final ShardStats shardStats : targetStats.getShards()) { + final SeqNoStats seqNoStats = shardStats.getSeqNoStats(); + final ShardRouting shardRouting = shardStats.getShardRouting(); + assertThat("failed on " + shardRouting, seqNoStats.getMaxSeqNo(), equalTo(maxSeqNo)); + assertThat("failed on " + shardRouting, seqNoStats.getLocalCheckpoint(), equalTo(maxSeqNo)); + assertThat( + "failed on " + shardRouting, + shardStats.getStats().getSegments().getMaxUnsafeAutoIdTimestamp(), + equalTo(maxUnsafeAutoIdTimestamp) + ); + } + + final int size = docs > 0 ? 2 * docs : 1; + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + + for (int i = docs; i < 2 * docs; i++) { + client().prepareIndex("target").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + flushAndRefresh(); + assertHitCount( + client().prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + 2 * docs + ); + assertHitCount(client().prepareSearch("source").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null)); + } finally { + // clean up + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null) + ) + .get(); + } + + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java index 50ff76c6b62f3..82ab5b0118c0e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java @@ -37,6 +37,7 @@ public AcknowledgedResponse createDataStream(String name) throws Exception { CreateDataStreamAction.Request request = new CreateDataStreamAction.Request(name); AcknowledgedResponse response = client().admin().indices().createDataStream(request).get(); assertThat(response.isAcknowledged(), is(true)); + performRemoteStoreTestAction(); return response; } @@ -67,6 +68,7 @@ public RolloverResponse rolloverDataStream(String name) throws Exception { RolloverResponse response = client().admin().indices().rolloverIndex(request).get(); assertThat(response.isAcknowledged(), is(true)); assertThat(response.isRolledOver(), is(true)); + performRemoteStoreTestAction(); return response; } @@ -109,5 +111,4 @@ public AcknowledgedResponse deleteIndexTemplate(String name) throws Exception { assertThat(response.isAcknowledged(), is(true)); return response; } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java index 737c0acc309fd..cd6cb0ca3b172 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java @@ -194,7 +194,7 @@ private static void indexDocs(BulkProcessor processor, int numDocs) { /** * Internal helper class to correlate backoff states with bulk responses. This is needed to check whether we maxed out the number * of retries but still got rejected (which is perfectly fine and can also happen from time to time under heavy load). - * + *

      * This implementation relies on an implementation detail in Retry, namely that the bulk listener is notified on the same thread * as the last call to the backoff policy's iterator. The advantage is that this is non-invasive to the rest of the production code. */ diff --git a/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java b/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java index c62c61d5919d6..aefabcb9bc14f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java @@ -69,7 +69,7 @@ /** * The purpose of this test is to verify that when a processor executes an operation asynchronously that * the expected result is the same as if the same operation happens synchronously. - * + *

      * In this test two test processor are defined that basically do the same operation, but a single processor * executes asynchronously. The result of the operation should be the same and also the order in which the * bulk responses are returned should be the same as how the corresponding index requests were defined. diff --git a/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java index 2dffc393ef749..b1934f901ac65 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java @@ -109,8 +109,8 @@ public List getAggregations() { @Override public List getFetchSubPhases(FetchPhaseConstructionContext context) { - /** - * Set up a fetch sub phase that throws an exception on indices whose name that start with "boom". + /* + Set up a fetch sub phase that throws an exception on indices whose name that start with "boom". */ return Collections.singletonList(fetchContext -> new FetchSubPhaseProcessor() { @Override @@ -594,6 +594,11 @@ protected Aggregator createInternal( ) throws IOException { return new TestAggregator(name, parent, searchContext); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } }; } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java index 9101d0b575ab6..7bd1467933e00 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/GetTermVectorsIT.java @@ -74,6 +74,10 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase { + public GetTermVectorsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(MockKeywordPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java index 91d280a9c4771..7c6c47c682281 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/termvectors/MultiTermVectorsIT.java @@ -52,6 +52,10 @@ import static org.hamcrest.Matchers.nullValue; public class MultiTermVectorsIT extends AbstractTermVectorsTestCase { + public MultiTermVectorsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + public void testDuelESLucene() throws Exception { AbstractTermVectorsTestCase.TestFieldSetting[] testFieldSettings = getFieldSettings(); createIndexBasedOnFieldSettings("test", "alias", testFieldSettings); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java index 4c8bf24b1655a..84648eda3d38c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java @@ -317,8 +317,8 @@ public void testThreeNodesNoClusterManagerBlock() throws Exception { ); Settings nonClusterManagerDataPathSettings1 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(0)); Settings nonClusterManagerDataPathSettings2 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(1)); - internalCluster().stopRandomNonClusterManagerNode(); - internalCluster().stopRandomNonClusterManagerNode(); + internalCluster().stopRandomNodeNotCurrentClusterManager(); + internalCluster().stopRandomNodeNotCurrentClusterManager(); logger.info("--> verify that there is no cluster-manager anymore on remaining node"); // spin here to wait till the state is set diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 4784441058e76..b30eb1f3e3b39 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -44,6 +44,7 @@ import org.opensearch.env.TestEnvironment; import org.opensearch.gateway.GatewayMetaState; import org.opensearch.gateway.PersistedClusterStateService; +import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.indices.IndicesService; import org.opensearch.node.Node.DiscoverySettings; import org.opensearch.test.InternalTestCluster; @@ -180,6 +181,16 @@ public void testBootstrapNotClusterManagerEligible() { expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapClusterManagerCommand.NOT_CLUSTER_MANAGER_NODE_MSG); } + public void testBootstrapRemoteClusterEnabled() { + final Environment environment = TestEnvironment.newEnvironment( + Settings.builder() + .put(internalCluster().getDefaultSettings()) + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build() + ); + expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapClusterManagerCommand.REMOTE_CLUSTER_STATE_ENABLED_NODE); + } + public void testBootstrapNoDataFolder() { final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); expectThrows(() -> unsafeBootstrap(environment), OpenSearchNodeCommand.NO_NODE_FOLDER_FOUND_MSG); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java new file mode 100644 index 0000000000000..54824b67b7abc --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalIT.java @@ -0,0 +1,338 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexResponse; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.Index; +import org.opensearch.index.IndexService; +import org.opensearch.index.IndexSettings; +import org.opensearch.indices.IndicesService; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; + +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class ClusterIndexRefreshIntervalIT extends OpenSearchIntegTestCase { + + public static final String INDEX_NAME = "test-index"; + + public static final String OTHER_INDEX_NAME = "other-test-index"; + + @Override + public Settings indexSettings() { + return Settings.builder().put(super.indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build(); + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + internalCluster().startClusterManagerOnlyNode(); + } + + public void testDefaultRefreshIntervalWithUpdateClusterAndIndexSettings() throws Exception { + String clusterManagerName = internalCluster().getClusterManagerName(); + List dataNodes = internalCluster().startDataOnlyNodes(2); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + GetIndexResponse getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, randomFrom(dataNodes)); + String uuid = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); + IndexService indexService = indicesService.indexService(new Index(INDEX_NAME, uuid)); + assertEquals(getDefaultRefreshInterval(), indexService.getRefreshTaskInterval()); + + // Update the cluster.default.index.refresh_interval setting to another value and validate the index refresh interval + TimeValue refreshInterval = TimeValue.timeValueMillis(randomIntBetween(10, 90) * 1000L); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), refreshInterval)) + .get(); + assertEquals(refreshInterval, indexService.getRefreshTaskInterval()); + + // Update of cluster.minimum.index.refresh_interval setting to value less than refreshInterval above will fail + TimeValue invalidMinimumRefreshInterval = TimeValue.timeValueMillis(refreshInterval.millis() + randomIntBetween(1, 1000)); + IllegalArgumentException exceptionDuringMinUpdate = assertThrows( + IllegalArgumentException.class, + () -> client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), invalidMinimumRefreshInterval) + ) + .get() + ); + assertEquals( + "cluster minimum index refresh interval [" + + invalidMinimumRefreshInterval + + "] more than cluster default index refresh interval [" + + refreshInterval + + "]", + exceptionDuringMinUpdate.getMessage() + ); + + // Update the cluster.minimum.index.refresh_interval setting to a valid value, this will succeed. + TimeValue validMinimumRefreshInterval = TimeValue.timeValueMillis(refreshInterval.millis() - randomIntBetween(1, 1000)); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), validMinimumRefreshInterval) + ) + .get(); + + // Update with invalid index setting index.refresh_interval, this will fail. + TimeValue invalidRefreshInterval = TimeValue.timeValueMillis(validMinimumRefreshInterval.millis() - randomIntBetween(1, 1000)); + String expectedMessage = "invalid index.refresh_interval [" + + invalidRefreshInterval + + "]: cannot be smaller than cluster.minimum.index.refresh_interval [" + + validMinimumRefreshInterval + + "]"; + + IllegalArgumentException exceptionDuringUpdateSettings = assertThrows( + IllegalArgumentException.class, + () -> client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), invalidRefreshInterval) + ) + ) + .actionGet() + ); + assertEquals(expectedMessage, exceptionDuringUpdateSettings.getMessage()); + + // Create another index with invalid index setting index.refresh_interval, this fails. + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), invalidRefreshInterval) + .build(); + IllegalArgumentException exceptionDuringCreateIndex = assertThrows( + IllegalArgumentException.class, + () -> createIndex(OTHER_INDEX_NAME, indexSettings) + ); + assertEquals(expectedMessage, exceptionDuringCreateIndex.getMessage()); + + // Update with valid index setting index.refresh_interval, this will succeed now. + TimeValue validRefreshInterval = TimeValue.timeValueMillis(validMinimumRefreshInterval.millis() + randomIntBetween(1, 1000)); + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), validRefreshInterval) + ) + ) + .get(); + // verify refresh task interval is updated. + assertEquals(validRefreshInterval, indexService.getRefreshTaskInterval()); + + // Try to create another index with valid index setting index.refresh_interval, this will pass. + createIndex( + OTHER_INDEX_NAME, + Settings.builder().put(indexSettings).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), validRefreshInterval).build() + ); + getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); + String otherUuid = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); + assertEquals(validRefreshInterval, indicesService.indexService(new Index(OTHER_INDEX_NAME, otherUuid)).getRefreshTaskInterval()); + + // Update the cluster.default.index.refresh_interval & cluster.minimum.index.refresh_interval setting to null + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .putNull(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey()) + .putNull(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey()) + ) + .get(); + // verify the index is still using the refresh interval passed in the update settings call + assertEquals(validRefreshInterval, indexService.getRefreshTaskInterval()); + + // Remove the index setting as well now, it should reset the refresh task interval to the default refresh interval + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().putNull(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey()) + ) + ) + .get(); + assertEquals(getDefaultRefreshInterval(), indexService.getRefreshTaskInterval()); + } + + public void testRefreshIntervalDisabled() throws ExecutionException, InterruptedException { + TimeValue clusterMinimumRefreshInterval = client().settings() + .getAsTime(IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.MINUS_ONE); + boolean createIndexSuccess = clusterMinimumRefreshInterval.equals(TimeValue.MINUS_ONE); + String clusterManagerName = internalCluster().getClusterManagerName(); + List dataNodes = internalCluster().startDataOnlyNodes(2); + Settings settings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), IndexSettings.MINIMUM_REFRESH_INTERVAL) + .build(); + if (createIndexSuccess) { + createIndex(INDEX_NAME, settings); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + GetIndexResponse getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, randomFrom(dataNodes)); + String uuid = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); + IndexService indexService = indicesService.indexService(new Index(INDEX_NAME, uuid)); + assertEquals(IndexSettings.MINIMUM_REFRESH_INTERVAL, indexService.getRefreshTaskInterval()); + } else { + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, settings)); + assertEquals( + "invalid index.refresh_interval [-1]: cannot be smaller than cluster.minimum.index.refresh_interval [" + + getMinRefreshIntervalForRefreshDisabled() + + "]", + exception.getMessage() + ); + } + } + + protected TimeValue getMinRefreshIntervalForRefreshDisabled() { + throw new RuntimeException("This is not expected to be called here, but for the implementor"); + } + + public void testInvalidRefreshInterval() { + String invalidRefreshInterval = "-10s"; + internalCluster().startDataOnlyNodes(2); + Settings settings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), invalidRefreshInterval) + .build(); + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, settings)); + assertEquals( + "failed to parse setting [index.refresh_interval] with value [" + + invalidRefreshInterval + + "] as a time value: negative durations are not supported", + exception.getMessage() + ); + } + + public void testCreateIndexWithExplicitNullRefreshInterval() throws ExecutionException, InterruptedException { + List dataNodes = internalCluster().startDataOnlyNodes(2); + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .putNull(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey()) + .build(); + createIndex(INDEX_NAME, indexSettings); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + GetIndexResponse getIndexResponse = client(internalCluster().getClusterManagerName()).admin() + .indices() + .getIndex(new GetIndexRequest()) + .get(); + String uuid = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); + + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, randomFrom(dataNodes)); + IndexService indexService = indicesService.indexService(new Index(INDEX_NAME, uuid)); + + assertEquals(IndexSettings.DEFAULT_REFRESH_INTERVAL, indexService.getRefreshTaskInterval()); + } + + /** + * In this test we check the case where an index is created with index setting `index.refresh_interval` with the value + * being lesser than the `cluster.minimum.index.refresh_interval`. Later we change the cluster minimum to be more than + * the index setting. The underlying index should continue to use the same refresh interval as earlier. + */ + public void testClusterMinimumChangeOnIndexWithCustomRefreshInterval() throws ExecutionException, InterruptedException { + List dataNodes = internalCluster().startDataOnlyNodes(2); + TimeValue customRefreshInterval = TimeValue.timeValueSeconds(getDefaultRefreshInterval().getSeconds() + randomIntBetween(1, 5)); + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), customRefreshInterval) + .build(); + createIndex(INDEX_NAME, indexSettings); + + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + GetIndexResponse getIndexResponse = client(internalCluster().getClusterManagerName()).admin() + .indices() + .getIndex(new GetIndexRequest()) + .get(); + String uuid = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); + + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, randomFrom(dataNodes)); + IndexService indexService = indicesService.indexService(new Index(INDEX_NAME, uuid)); + + assertEquals(customRefreshInterval, indexService.getRefreshTaskInterval()); + + // Update the cluster.minimum.index.refresh_interval setting to a valid value higher the custom refresh interval. + // At the same time, due to certain degree of randomness in the test, we update the cluster.default.refresh_interval + // to a valid value as well to be deterministic in test behaviour. + TimeValue clusterMinimum = TimeValue.timeValueSeconds(customRefreshInterval.getSeconds() + randomIntBetween(1, 5)); + TimeValue clusterDefault = TimeValue.timeValueSeconds(customRefreshInterval.getSeconds() + 6); + String clusterManagerName = internalCluster().getClusterManagerName(); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), clusterDefault) + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), clusterMinimum) + ) + .get(); + + // Validate that the index refresh interval is still the existing one that was used during index creation + assertEquals(customRefreshInterval, indexService.getRefreshTaskInterval()); + + // Update index setting to a value >= current cluster minimum and this should happen successfully. + customRefreshInterval = TimeValue.timeValueSeconds(clusterMinimum.getSeconds() + randomIntBetween(1, 5)); + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), customRefreshInterval) + ) + ) + .get(); + assertEquals(customRefreshInterval, indexService.getRefreshTaskInterval()); + } + + protected TimeValue getDefaultRefreshInterval() { + return IndexSettings.DEFAULT_REFRESH_INTERVAL; + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalWithNodeSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalWithNodeSettingsIT.java new file mode 100644 index 0000000000000..5fc7bfcbcd442 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/metadata/ClusterIndexRefreshIntervalWithNodeSettingsIT.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.indices.IndicesService; + +public class ClusterIndexRefreshIntervalWithNodeSettingsIT extends ClusterIndexRefreshIntervalIT { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), getDefaultRefreshInterval()) + .put( + IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), + getMinRefreshIntervalForRefreshDisabled().toString() + ) + .build(); + } + + @Override + protected TimeValue getMinRefreshIntervalForRefreshDisabled() { + return TimeValue.timeValueSeconds(1); + } + + @Override + protected TimeValue getDefaultRefreshInterval() { + return TimeValue.timeValueSeconds(5); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java index 38b86d307d197..737b272613a44 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java @@ -461,7 +461,7 @@ public boolean validateClusterForming() { /** * Tests that indices are properly deleted even if there is a cluster-manager transition in between. - * Test for https://github.com/elastic/elasticsearch/issues/11665 + * Test for Elasticsearch issue #11665 */ public void testIndicesDeleted() throws Exception { final String idxName = "test"; diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java index f0d52405efac6..9aee6f7f7a192 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -195,6 +196,8 @@ public void testIsolateClusterManagerAndVerifyClusterStateConsensus() throws Exc } } + ClusterStateStats clusterStateStats = internalCluster().clusterService().getClusterManagerService().getClusterStateStats(); + assertTrue(clusterStateStats.getUpdateFailed() > 0); }); } diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java index a2864b6dfd1da..70124c8c46700 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java @@ -136,7 +136,7 @@ public void testClusterJoinDespiteOfPublishingIssues() throws Exception { // shutting down the nodes, to avoid the leakage check tripping // on the states associated with the commit requests we may have dropped - internalCluster().stopRandomNonClusterManagerNode(); + internalCluster().stopRandomNodeNotCurrentClusterManager(); } public void testClusterFormingWithASlowNode() { diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/single/SingleNodeDiscoveryIT.java index 90bdcf7fded11..1f6c8eac6c391 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/single/SingleNodeDiscoveryIT.java @@ -76,6 +76,7 @@ public void testSingleNodesDoNotDiscoverEachOther() throws IOException, Interrup @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() + .put(featureFlagSettings()) .put("discovery.type", "single-node") .put("transport.type", getTestTransportType()) /* @@ -142,6 +143,7 @@ public boolean innerMatch(final LogEvent event) { @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() + .put(featureFlagSettings()) .put("discovery.type", "zen") .put("transport.type", getTestTransportType()) .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index 2bab61f3e1c4c..229cd7bffad2f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -53,7 +53,7 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.Engine; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.shard.ShardPath; @@ -519,7 +519,7 @@ public void testReuseInFileBasedPeerRecovery() throws Exception { .put("number_of_replicas", 1) // disable merges to keep segments the same - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // expire retention leases quickly .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java new file mode 100644 index 0000000000000..dfde1b958882c --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java @@ -0,0 +1,199 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.settings.Settings; +import org.opensearch.discovery.DiscoveryStats; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteClusterStateServiceIT extends RemoteStoreBaseIntegTestCase { + + private static String INDEX_NAME = "test-index"; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true).build(); + } + + private void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, String indices, int replicaCount, int shardCount) { + internalCluster().startClusterManagerOnlyNodes(numClusterManagerNodes); + internalCluster().startDataOnlyNodes(numDataOnlyNodes); + for (String index : indices.split(",")) { + createIndex(index, remoteStoreIndexSettings(replicaCount, shardCount)); + ensureYellowAndNoInitializingShards(index); + ensureGreen(index); + } + } + + private Map initialTestSetup(int shardCount, int replicaCount, int dataNodeCount, int clusterManagerNodeCount) { + prepareCluster(clusterManagerNodeCount, dataNodeCount, INDEX_NAME, replicaCount, shardCount); + Map indexStats = indexData(1, false, INDEX_NAME); + assertEquals(shardCount * (replicaCount + 1), getNumShards(INDEX_NAME).totalNumShards); + ensureGreen(INDEX_NAME); + return indexStats; + } + + public void testFullClusterRestoreStaleDelete() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); + setReplicaCount(0); + setReplicaCount(2); + setReplicaCount(0); + setReplicaCount(1); + setReplicaCount(0); + setReplicaCount(1); + setReplicaCount(0); + setReplicaCount(2); + setReplicaCount(0); + + RemoteClusterStateService remoteClusterStateService = internalCluster().getClusterManagerNodeInstance( + RemoteClusterStateService.class + ); + + RepositoriesService repositoriesService = internalCluster().getClusterManagerNodeInstance(RepositoriesService.class); + + BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(REPOSITORY_NAME); + BlobPath baseMetadataPath = repository.basePath() + .add( + Base64.getUrlEncoder() + .withoutPadding() + .encodeToString(getClusterState().getClusterName().value().getBytes(StandardCharsets.UTF_8)) + ) + .add("cluster-state") + .add(getClusterState().metadata().clusterUUID()); + + assertEquals(10, repository.blobStore().blobContainer(baseMetadataPath.add("manifest")).listBlobsByPrefix("manifest").size()); + + Map indexMetadataMap = remoteClusterStateService.getLatestClusterState( + cluster().getClusterName(), + getClusterState().metadata().clusterUUID() + ).getMetadata().getIndices(); + assertEquals(0, indexMetadataMap.values().stream().findFirst().get().getNumberOfReplicas()); + assertEquals(shardCount, indexMetadataMap.values().stream().findFirst().get().getNumberOfShards()); + } + + public void testRemoteStateStats() { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + prepareCluster(clusterManagerNodeCount, dataNodeCount, INDEX_NAME, replicaCount, shardCount); + String clusterManagerNode = internalCluster().getClusterManagerName(); + String dataNode = internalCluster().getDataNodeNames().stream().collect(Collectors.toList()).get(0); + + // Fetch _nodes/stats + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats(clusterManagerNode) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .get(); + + // assert cluster state stats + assertClusterManagerClusterStateStats(nodesStatsResponse); + + NodesStatsResponse nodesStatsResponseDataNode = client().admin() + .cluster() + .prepareNodesStats(dataNode) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .get(); + // assert cluster state stats for data node + DiscoveryStats dataNodeDiscoveryStats = nodesStatsResponseDataNode.getNodes().get(0).getDiscoveryStats(); + assertNotNull(dataNodeDiscoveryStats.getClusterStateStats()); + assertEquals(0, dataNodeDiscoveryStats.getClusterStateStats().getUpdateSuccess()); + + // call nodes/stats with nodeId filter + NodesStatsResponse nodesStatsNodeIdFilterResponse = client().admin() + .cluster() + .prepareNodesStats(dataNode) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .setNodesIds(clusterManagerNode) + .get(); + + assertClusterManagerClusterStateStats(nodesStatsNodeIdFilterResponse); + } + + private void assertClusterManagerClusterStateStats(NodesStatsResponse nodesStatsResponse) { + // assert cluster state stats + DiscoveryStats discoveryStats = nodesStatsResponse.getNodes().get(0).getDiscoveryStats(); + + assertNotNull(discoveryStats.getClusterStateStats()); + assertTrue(discoveryStats.getClusterStateStats().getUpdateSuccess() > 1); + assertEquals(0, discoveryStats.getClusterStateStats().getUpdateFailed()); + assertTrue(discoveryStats.getClusterStateStats().getUpdateTotalTimeInMillis() > 0); + // assert remote state stats + assertTrue(discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getSuccessCount() > 1); + assertEquals(0, discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getFailedCount()); + assertTrue(discoveryStats.getClusterStateStats().getPersistenceStats().get(0).getTotalTimeInMillis() > 0); + } + + public void testRemoteStateStatsFromAllNodes() { + int shardCount = randomIntBetween(1, 5); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 3; + prepareCluster(clusterManagerNodeCount, dataNodeCount, INDEX_NAME, replicaCount, shardCount); + String[] allNodes = internalCluster().getNodeNames(); + // call _nodes/stats/discovery from all the nodes + for (String node : allNodes) { + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats(node) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .get(); + validateNodesStatsResponse(nodesStatsResponse); + } + + // call _nodes/stats/discovery from all the nodes with random nodeId filter + for (String node : allNodes) { + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats(node) + .addMetric(NodesStatsRequest.Metric.DISCOVERY.metricName()) + .setNodesIds(allNodes[randomIntBetween(0, allNodes.length - 1)]) + .get(); + validateNodesStatsResponse(nodesStatsResponse); + } + } + + private void validateNodesStatsResponse(NodesStatsResponse nodesStatsResponse) { + // _nodes/stats/discovery must never fail due to any exception + assertFalse(nodesStatsResponse.toString().contains("exception")); + assertNotNull(nodesStatsResponse.getNodes()); + assertNotNull(nodesStatsResponse.getNodes().get(0)); + assertNotNull(nodesStatsResponse.getNodes().get(0).getDiscoveryStats()); + } + + private void setReplicaCount(int replicaCount) { + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, replicaCount)) + .get(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/index/IndexSortIT.java b/server/src/internalClusterTest/java/org/opensearch/index/IndexSortIT.java index d547ded8152dd..bb6e356db188f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/IndexSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/IndexSortIT.java @@ -32,22 +32,45 @@ package org.opensearch.index; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.containsString; -public class IndexSortIT extends OpenSearchIntegTestCase { +public class IndexSortIT extends ParameterizedOpenSearchIntegTestCase { private static final XContentBuilder TEST_MAPPING = createTestMapping(); + public IndexSortIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + private static XContentBuilder createTestMapping() { try { return jsonBuilder().startObject() diff --git a/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java b/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java index 883e539b74b68..033ea75b68958 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java @@ -15,7 +15,6 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.rest.RestStatus; import org.opensearch.index.shard.IndexShard; @@ -31,7 +30,6 @@ import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Objects; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -39,7 +37,8 @@ import static java.util.Arrays.asList; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.index.SegmentReplicationPressureService.MAX_INDEXING_CHECKPOINTS; -import static org.opensearch.index.SegmentReplicationPressureService.MAX_REPLICATION_TIME_SETTING; +import static org.opensearch.index.SegmentReplicationPressureService.MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING; +import static org.opensearch.index.SegmentReplicationPressureService.MAX_REPLICATION_TIME_BACKPRESSURE_SETTING; import static org.opensearch.index.SegmentReplicationPressureService.SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -54,7 +53,7 @@ protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) - .put(MAX_REPLICATION_TIME_SETTING.getKey(), TimeValue.timeValueSeconds(1)) + .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueSeconds(1)) .put(MAX_INDEXING_CHECKPOINTS.getKey(), MAX_CHECKPOINTS_BEHIND) .build(); } @@ -225,7 +224,10 @@ public void testBelowReplicaLimit() throws Exception { public void testFailStaleReplica() throws Exception { - Settings settings = Settings.builder().put(MAX_REPLICATION_TIME_SETTING.getKey(), TimeValue.timeValueMillis(500)).build(); + Settings settings = Settings.builder() + .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .put(MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING.getKey(), TimeValue.timeValueMillis(1000)) + .build(); // Starts a primary and replica node. final String primaryNode = internalCluster().startNode(settings); createIndex(INDEX_NAME); @@ -260,11 +262,13 @@ public void testFailStaleReplica() throws Exception { } public void testWithDocumentReplicationEnabledIndex() throws Exception { - assumeTrue( - "Can't create DocRep index with remote store enabled. Skipping.", - Objects.equals(featureFlagSettings().get(FeatureFlags.REMOTE_STORE, "false"), "false") + assumeFalse( + "Skipping the test as its not compatible with segment replication with remote store. Cannot create DocRep indices with Remote store enabled", + segmentReplicationWithRemoteEnabled() ); - Settings settings = Settings.builder().put(MAX_REPLICATION_TIME_SETTING.getKey(), TimeValue.timeValueMillis(500)).build(); + Settings settings = Settings.builder() + .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueMillis(500)) + .build(); // Starts a primary and replica node. final String primaryNode = internalCluster().startNode(settings); createIndex( diff --git a/server/src/internalClusterTest/java/org/opensearch/index/codec/CodecCompressionLevelIT.java b/server/src/internalClusterTest/java/org/opensearch/index/codec/CodecCompressionLevelIT.java deleted file mode 100644 index 5f3e53f1454fc..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/index/codec/CodecCompressionLevelIT.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec; - -import org.apache.logging.log4j.core.util.Throwables; -import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.settings.Settings; -import org.opensearch.test.OpenSearchIntegTestCase; - -import java.util.concurrent.ExecutionException; - -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; - -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) -public class CodecCompressionLevelIT extends OpenSearchIntegTestCase { - - public void testLuceneCodecsCreateIndexWithCompressionLevel() { - - internalCluster().ensureAtLeastNumDataNodes(1); - final String index = "test-index"; - - // creating index - assertThrows( - IllegalArgumentException.class, - () -> createIndex( - index, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) - .put("index.codec.compression_level", randomIntBetween(1, 6)) - .build() - ) - ); - - createIndex( - index, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) - .build() - ); - ensureGreen(index); - } - - public void testZStandardCodecsCreateIndexWithCompressionLevel() { - - internalCluster().ensureAtLeastNumDataNodes(1); - final String index = "test-index"; - - // creating index - createIndex( - index, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", randomFrom(CodecService.ZSTD_CODEC, CodecService.ZSTD_NO_DICT_CODEC)) - .put("index.codec.compression_level", randomIntBetween(1, 6)) - .build() - ); - - ensureGreen(index); - } - - public void testZStandardToLuceneCodecsWithCompressionLevel() throws ExecutionException, InterruptedException { - - internalCluster().ensureAtLeastNumDataNodes(1); - final String index = "test-index"; - - // creating index - createIndex( - index, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", randomFrom(CodecService.ZSTD_CODEC, CodecService.ZSTD_NO_DICT_CODEC)) - .put("index.codec.compression_level", randomIntBetween(1, 6)) - .build() - ); - ensureGreen(index); - - assertAcked(client().admin().indices().prepareClose(index)); - - Throwable executionException = expectThrows( - ExecutionException.class, - () -> client().admin() - .indices() - .updateSettings( - new UpdateSettingsRequest(index).settings( - Settings.builder().put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) - ) - ) - .get() - ); - - Throwable rootCause = Throwables.getRootCause(executionException); - assertEquals(IllegalArgumentException.class, rootCause.getClass()); - assertTrue(rootCause.getMessage().startsWith("Compression level cannot be set")); - - assertAcked( - client().admin() - .indices() - .updateSettings( - new UpdateSettingsRequest(index).settings( - Settings.builder() - .put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) - .put("index.codec.compression_level", (String) null) - ) - ) - .get() - ); - - assertAcked(client().admin().indices().prepareOpen(index)); - ensureGreen(index); - } - - public void testLuceneToZStandardCodecsWithCompressionLevel() throws ExecutionException, InterruptedException { - - internalCluster().ensureAtLeastNumDataNodes(1); - final String index = "test-index"; - - // creating index - createIndex( - index, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) - .build() - ); - ensureGreen(index); - - assertAcked(client().admin().indices().prepareClose(index)); - - Throwable executionException = expectThrows( - ExecutionException.class, - () -> client().admin() - .indices() - .updateSettings( - new UpdateSettingsRequest(index).settings( - Settings.builder() - .put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) - .put("index.codec.compression_level", randomIntBetween(1, 6)) - ) - ) - .get() - ); - - Throwable rootCause = Throwables.getRootCause(executionException); - assertEquals(IllegalArgumentException.class, rootCause.getClass()); - assertTrue(rootCause.getMessage().startsWith("Compression level cannot be set")); - - assertAcked( - client().admin() - .indices() - .updateSettings( - new UpdateSettingsRequest(index).settings( - Settings.builder() - .put("index.codec", randomFrom(CodecService.ZSTD_CODEC, CodecService.ZSTD_NO_DICT_CODEC)) - .put("index.codec.compression_level", randomIntBetween(1, 6)) - ) - ) - .get() - ); - - assertAcked(client().admin().indices().prepareOpen(index)); - ensureGreen(index); - } - -} diff --git a/server/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java b/server/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java deleted file mode 100644 index 23d5cc3b35486..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/index/codec/MultiCodecMergeIT.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec; - -import org.opensearch.action.admin.indices.flush.FlushResponse; -import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; -import org.opensearch.action.admin.indices.refresh.RefreshResponse; -import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; -import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.engine.Segment; -import org.opensearch.test.OpenSearchIntegTestCase; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static java.util.stream.Collectors.toList; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_METADATA; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_READ; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_BLOCKS_WRITE; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.is; - -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) -public class MultiCodecMergeIT extends OpenSearchIntegTestCase { - - public void testForceMergeMultipleCodecs() throws ExecutionException, InterruptedException { - - Map codecMap = Map.of( - "best_compression", - "BEST_COMPRESSION", - "zlib", - "BEST_COMPRESSION", - "zstd_no_dict", - "ZSTD_NO_DICT", - "zstd", - "ZSTD", - "default", - "BEST_SPEED", - "lz4", - "BEST_SPEED" - ); - - for (Map.Entry codec : codecMap.entrySet()) { - forceMergeMultipleCodecs(codec.getKey(), codec.getValue(), codecMap); - } - - } - - private void forceMergeMultipleCodecs(String finalCodec, String finalCodecMode, Map codecMap) throws ExecutionException, - InterruptedException { - - internalCluster().ensureAtLeastNumDataNodes(1); - final String index = "test-index" + finalCodec; - - // creating index - createIndex( - index, - Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.codec", "default") - .put("index.merge.policy.max_merged_segment", "1b") - .build() - ); - ensureGreen(index); - // ingesting and asserting segment codec mode for all four codecs - for (Map.Entry codec : codecMap.entrySet()) { - useCodec(index, codec.getKey()); - ingestDocs(index); - } - - assertTrue( - getSegments(index).stream() - .flatMap(s -> s.getAttributes().values().stream()) - .collect(Collectors.toSet()) - .containsAll(codecMap.values()) - ); - - // force merge into final codec - useCodec(index, finalCodec); - flushAndRefreshIndex(index); - final ForceMergeResponse forceMergeResponse = client().admin().indices().prepareForceMerge(index).setMaxNumSegments(1).get(); - - assertThat(forceMergeResponse.getFailedShards(), is(0)); - assertThat(forceMergeResponse.getSuccessfulShards(), is(1)); - - flushAndRefreshIndex(index); - - List segments = getSegments(index).stream().filter(Segment::isSearch).collect(Collectors.toList()); - assertEquals(1, segments.size()); - assertTrue(segments.stream().findFirst().get().attributes.containsValue(finalCodecMode)); - } - - private void useCodec(String index, String codec) throws ExecutionException, InterruptedException { - assertAcked(client().admin().indices().prepareClose(index)); - - assertAcked( - client().admin() - .indices() - .updateSettings(new UpdateSettingsRequest(index).settings(Settings.builder().put("index.codec", codec))) - .get() - ); - - assertAcked(client().admin().indices().prepareOpen(index)); - } - - private void ingestDocs(String index) throws InterruptedException { - ingest(index); - flushAndRefreshIndex(index); - } - - private ArrayList getSegments(String index) { - - return new ArrayList<>( - client().admin() - .indices() - .segments(new IndicesSegmentsRequest(index)) - .actionGet() - .getIndices() - .get(index) - .getShards() - .get(0) - .getShards()[0].getSegments() - ); - } - - private void ingest(String index) throws InterruptedException { - - final int nbDocs = randomIntBetween(1, 5); - indexRandom( - randomBoolean(), - false, - randomBoolean(), - IntStream.range(0, nbDocs) - .mapToObj(i -> client().prepareIndex(index).setId(UUID.randomUUID().toString()).setSource("num", i)) - .collect(toList()) - ); - } - - private void flushAndRefreshIndex(String index) { - - // Request is not blocked - for (String blockSetting : Arrays.asList( - SETTING_BLOCKS_READ, - SETTING_BLOCKS_WRITE, - SETTING_READ_ONLY, - SETTING_BLOCKS_METADATA, - SETTING_READ_ONLY_ALLOW_DELETE - )) { - try { - enableIndexBlock(index, blockSetting); - FlushResponse flushResponse = client().admin().indices().prepareFlush(index).setForce(true).execute().actionGet(); - assertNoFailures(flushResponse); - RefreshResponse response = client().admin().indices().prepareRefresh(index).execute().actionGet(); - assertNoFailures(response); - } finally { - disableIndexBlock(index, blockSetting); - } - } - } - -} diff --git a/server/src/internalClusterTest/java/org/opensearch/index/codec/ZstdNotEnabledIT.java b/server/src/internalClusterTest/java/org/opensearch/index/codec/ZstdNotEnabledIT.java new file mode 100644 index 0000000000000..9b1fa77fc9a5a --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/index/codec/ZstdNotEnabledIT.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec; + +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) +public class ZstdNotEnabledIT extends OpenSearchIntegTestCase { + + public void testZStdCodecsWithoutPluginInstalled() { + + internalCluster().startNode(); + final String index = "test-index"; + + // creating index with zstd and zstd_no_dict should fail if custom-codecs plugin is not installed + for (String codec : List.of("zstd", "zstd_no_dict")) { + assertThrows( + IllegalArgumentException.class, + () -> createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.codec", codec) + .build() + ) + ); + } + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/index/search/MatchPhraseQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/index/search/MatchPhraseQueryIT.java index 6d76ee48a5b95..2d28578dbebcc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/search/MatchPhraseQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/search/MatchPhraseQueryIT.java @@ -32,26 +32,50 @@ package org.opensearch.index.search; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.MatchPhraseQueryBuilder; import org.opensearch.index.search.MatchQuery.ZeroTermsQuery; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.junit.Before; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.concurrent.ExecutionException; import static org.opensearch.index.query.QueryBuilders.matchPhraseQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -public class MatchPhraseQueryIT extends OpenSearchIntegTestCase { +public class MatchPhraseQueryIT extends ParameterizedOpenSearchIntegTestCase { + private static final String INDEX = "test"; + public MatchPhraseQueryIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Before public void setUp() throws Exception { super.setUp(); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index 6caca5bc6cb2b..44a900491d949 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -627,7 +627,15 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResul } } }; - final IndexShard newShard = newIndexShard(indexService, shard, wrapper, getInstanceFromNode(CircuitBreakerService.class), listener); + NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); + final IndexShard newShard = newIndexShard( + indexService, + shard, + wrapper, + getInstanceFromNode(CircuitBreakerService.class), + env.nodeId(), + listener + ); shardRef.set(newShard); recoverShard(newShard); @@ -651,6 +659,7 @@ public static final IndexShard newIndexShard( final IndexShard shard, CheckedFunction wrapper, final CircuitBreakerService cbs, + final String nodeId, final IndexingOperationListener... listeners ) throws IOException { ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry()); @@ -678,6 +687,9 @@ public static final IndexShard newIndexShard( (indexSettings, shardRouting) -> new InternalTranslogFactory(), SegmentReplicationCheckpointPublisher.EMPTY, null, + null, + () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + nodeId, null ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java index f8c2acbf99f70..b431079476624 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandIT.java @@ -73,7 +73,7 @@ import org.opensearch.env.TestEnvironment; import org.opensearch.gateway.GatewayMetaState; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.MockEngineFactoryPlugin; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.translog.TestTranslog; @@ -135,7 +135,7 @@ public void testCorruptIndex() throws Exception { Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "-1") .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "checksum") diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java index 7e1d0792e3ddb..8291fef5d177b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java @@ -72,7 +72,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; @@ -167,7 +167,7 @@ public void testCorruptFileAndRecover() throws ExecutionException, InterruptedEx Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // no checkindex - we corrupt shards on purpose .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no translog based flush - it might change the .liv / segments.N files @@ -286,7 +286,7 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on // purpose // no translog based flush - it might change the .liv / segments.N files @@ -552,7 +552,7 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") // no replicas for this test - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) // no checkindex - we corrupt shards on purpose .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no translog based flush - it might change the .liv / segments.N files @@ -624,7 +624,7 @@ public void testReplicaCorruption() throws Exception { prepareCreate("test").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, cluster().numDataNodes() - 1) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on // purpose .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) // no diff --git a/server/src/internalClusterTest/java/org/opensearch/index/suggest/stats/SuggestStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/index/suggest/stats/SuggestStatsIT.java index 9940b1eb13a52..6332b1b97426f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/suggest/stats/SuggestStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/suggest/stats/SuggestStatsIT.java @@ -32,6 +32,8 @@ package org.opensearch.index.suggest.stats; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; @@ -42,17 +44,22 @@ import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.search.stats.SearchStats; import org.opensearch.search.suggest.SuggestBuilder; import org.opensearch.search.suggest.phrase.PhraseSuggestionBuilder; import org.opensearch.search.suggest.term.TermSuggestionBuilder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.Set; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.hamcrest.Matchers.equalTo; @@ -61,7 +68,25 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SuggestStatsIT extends OpenSearchIntegTestCase { +public class SuggestStatsIT extends ParameterizedOpenSearchIntegTestCase { + + public SuggestStatsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected int numberOfReplicas() { return 0; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 12fee85288bc2..848f6eddbb0df 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -32,6 +32,8 @@ package org.opensearch.indices; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.opensearch.action.search.SearchResponse; @@ -40,13 +42,14 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.cache.request.RequestCacheStats; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.time.ZoneId; @@ -54,8 +57,10 @@ import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.util.Arrays; +import java.util.Collection; import java.util.List; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.dateRange; import static org.opensearch.search.aggregations.AggregationBuilders.filter; @@ -64,7 +69,23 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -public class IndicesRequestCacheIT extends OpenSearchIntegTestCase { +public class IndicesRequestCacheIT extends ParameterizedOpenSearchIntegTestCase { + public IndicesRequestCacheIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } // One of the primary purposes of the query cache is to cache aggs results public void testCacheAggs() throws Exception { @@ -518,7 +539,7 @@ public void testCanCache() throws Exception { assertCacheState(client, "index", 0, 4); } - public void testCacheWithFilteredAlias() { + public void testCacheWithFilteredAlias() throws InterruptedException { Client client = client(); Settings settings = Settings.builder() .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) @@ -541,6 +562,8 @@ public void testCacheWithFilteredAlias() { OpenSearchAssertions.assertAllSuccessful(forceMergeResponse); refresh(); + indexRandomForConcurrentSearch("index"); + assertCacheState(client, "index", 0, 0); SearchResponse r1 = client.prepareSearch("index") diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java index dfe6889df2319..76f391bdcbb76 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -77,8 +77,8 @@ import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.BREAKER; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.search.SearchBootstrapSettings.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY; import static org.opensearch.search.aggregations.AggregationBuilders.cardinality; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.OpenSearchIntegTestCase.Scope.TEST; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java index e9962706bcd39..c049c8ed2d4a6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -56,15 +56,12 @@ public class IndexPrimaryRelocationIT extends OpenSearchIntegTestCase { private static final int RELOCATION_COUNT = 15; - public void setup() {} - public Settings indexSettings() { return Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build(); } public void testPrimaryRelocationWhileIndexing() throws Exception { internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(2, 3)); - setup(); client().admin().indices().prepareCreate("test").setSettings(indexSettings()).setMapping("field", "type=text").get(); ensureGreen("test"); AtomicInteger numAutoGenDocs = new AtomicInteger(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java index bdefd7a5e199a..f485d4e402b41 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java @@ -91,7 +91,7 @@ public void testGlobalPrimaryAllocation() throws Exception { /** * This test verifies the happy path where primary shard allocation is balanced when multiple indices are created. - * + *

      * This test in general passes without primary shard balance as well due to nature of allocation algorithm which * assigns all primary shards first followed by replica copies. */ diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java index 9539044bf75b0..1d93eecd6b245 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java @@ -18,7 +18,6 @@ import org.opensearch.common.Nullable; import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; @@ -198,14 +197,14 @@ protected IndexShard getIndexShard(String node, ShardId shardId, String indexNam protected IndexShard getIndexShard(String node, String indexName) { final Index index = resolveIndex(indexName); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); - IndexService indexService = indicesService.indexServiceSafe(index); + IndexService indexService = indicesService.indexService(index); + assertNotNull(indexService); final Optional shardId = indexService.shardIds().stream().findFirst(); - return indexService.getShard(shardId.get()); + return shardId.map(indexService::getShard).orElse(null); } protected boolean segmentReplicationWithRemoteEnabled() { - return IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings()).booleanValue() - && "true".equalsIgnoreCase(featureFlagSettings().get(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL)); + return IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexSettings()).booleanValue(); } protected Releasable blockReplication(List nodes, CountDownLatch latch) { @@ -241,7 +240,7 @@ protected Releasable blockReplication(List nodes, CountDownLatch latch) protected void assertReplicaCheckpointUpdated(IndexShard primaryShard) throws Exception { assertBusy(() -> { - Set groupStats = primaryShard.getReplicationStats(); + Set groupStats = primaryShard.getReplicationStatsForTrackedReplicas(); assertEquals(primaryShard.indexSettings().getNumberOfReplicas(), groupStats.size()); for (SegmentReplicationShardStats shardStat : groupStats) { assertEquals(0, shardStat.getCheckpointsBehindCount()); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java index a82fd8d845709..186a5ce39f131 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java @@ -19,6 +19,7 @@ import org.opensearch.test.OpenSearchIntegTestCase; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.indices.IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_SETTING_REPLICATION_TYPE; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -123,4 +124,30 @@ public void testIndexReplicationSettingOverridesDocRepClusterSetting() throws Ex assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabled(), false); } + public void testIndexReplicationTypeWhenRestrictSettingTrue() { + testRestrictIndexReplicationTypeSetting(true, randomFrom(ReplicationType.values())); + } + + public void testIndexReplicationTypeWhenRestrictSettingFalse() { + testRestrictIndexReplicationTypeSetting(false, randomFrom(ReplicationType.values())); + } + + private void testRestrictIndexReplicationTypeSetting(boolean setRestrict, ReplicationType replicationType) { + String expectedExceptionMsg = + "Validation Failed: 1: index setting [index.replication.type] is not allowed to be set as [cluster.restrict.index.replication_type=true];"; + String clusterManagerName = internalCluster().startNode( + Settings.builder().put(CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey(), setRestrict).build() + ); + internalCluster().startDataOnlyNodes(1); + + // Test create index fails + Settings indexSettings = Settings.builder().put(indexSettings()).put(SETTING_REPLICATION_TYPE, replicationType).build(); + if (setRestrict) { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, indexSettings)); + assertEquals(expectedExceptionMsg, exception.getMessage()); + } else { + createIndex(INDEX_NAME, indexSettings); + } + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationDisruptionIT.java new file mode 100644 index 0000000000000..66b26b5d25cfe --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationDisruptionIT.java @@ -0,0 +1,167 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.apache.lucene.tests.util.LuceneTestCase; +import org.opensearch.action.admin.indices.recovery.RecoveryResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.recovery.FileChunkRequest; +import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; +import org.junit.Before; + +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; + +/** + * These tests simulate corruption cases during replication. They are skipped on WindowsFS simulation where file renaming + * can fail with an access denied IOException because deletion is not permitted. + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +@LuceneTestCase.SuppressFileSystems("WindowsFS") +public class SegmentReplicationDisruptionIT extends SegmentReplicationBaseIT { + @Before + private void setup() { + internalCluster().startClusterManagerOnlyNode(); + } + + public void testSendCorruptBytesToReplica() throws Exception { + final String primaryNode = internalCluster().startDataOnlyNode(); + createIndex( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.refresh_interval", -1) + .build() + ); + ensureYellow(INDEX_NAME); + final String replicaNode = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + MockTransportService primaryTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode + )); + CountDownLatch latch = new CountDownLatch(1); + AtomicBoolean failed = new AtomicBoolean(false); + primaryTransportService.addSendBehavior( + internalCluster().getInstance(TransportService.class, replicaNode), + (connection, requestId, action, request, options) -> { + if (action.equals(SegmentReplicationTargetService.Actions.FILE_CHUNK) && failed.getAndSet(true) == false) { + FileChunkRequest req = (FileChunkRequest) request; + TransportRequest corrupt = new FileChunkRequest( + req.recoveryId(), + ((FileChunkRequest) request).requestSeqNo(), + ((FileChunkRequest) request).shardId(), + ((FileChunkRequest) request).metadata(), + ((FileChunkRequest) request).position(), + new BytesArray("test"), + false, + 0, + 0L + ); + connection.sendRequest(requestId, action, corrupt, options); + latch.countDown(); + } else { + connection.sendRequest(requestId, action, request, options); + } + } + ); + for (int i = 0; i < 100; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + } + final long originalRecoveryTime = getRecoveryStopTime(replicaNode); + assertNotEquals(originalRecoveryTime, 0); + refresh(INDEX_NAME); + latch.await(); + assertTrue(failed.get()); + waitForNewPeerRecovery(replicaNode, originalRecoveryTime); + // reset checkIndex to ensure our original shard doesn't throw + resetCheckIndexStatus(); + waitForSearchableDocs(100, primaryNode, replicaNode); + } + + public void testWipeSegmentBetweenSyncs() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String primaryNode = internalCluster().startDataOnlyNode(); + createIndex( + INDEX_NAME, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.refresh_interval", -1) + .build() + ); + ensureYellow(INDEX_NAME); + final String replicaNode = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + for (int i = 0; i < 10; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + } + refresh(INDEX_NAME); + ensureGreen(INDEX_NAME); + final long originalRecoveryTime = getRecoveryStopTime(replicaNode); + + final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME); + waitForSearchableDocs(INDEX_NAME, 10, List.of(replicaNode)); + indexShard.store().directory().deleteFile("_0.si"); + + for (int i = 11; i < 21; i++) { + client().prepareIndex(INDEX_NAME) + .setId(String.valueOf(i)) + .setSource(jsonBuilder().startObject().field("field", i).endObject()) + .get(); + } + refresh(INDEX_NAME); + waitForNewPeerRecovery(replicaNode, originalRecoveryTime); + resetCheckIndexStatus(); + waitForSearchableDocs(20, primaryNode, replicaNode); + } + + private void waitForNewPeerRecovery(String replicaNode, long originalRecoveryTime) throws Exception { + assertBusy(() -> { + // assert we have a peer recovery after the original + final long time = getRecoveryStopTime(replicaNode); + assertNotEquals(time, 0); + assertNotEquals(originalRecoveryTime, time); + + }, 1, TimeUnit.MINUTES); + } + + private long getRecoveryStopTime(String nodeName) { + final RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries(INDEX_NAME).get(); + final List recoveryStates = recoveryResponse.shardRecoveryStates().get(INDEX_NAME); + for (RecoveryState recoveryState : recoveryStates) { + if (recoveryState.getTargetNode().getName().equals(nodeName)) { + return recoveryState.getTimer().stopTime(); + } + } + return 0L; + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 69cdd80bb5085..a2996d87a851b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -15,12 +15,14 @@ import org.apache.lucene.document.SortedDocValuesField; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; +import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.StandardDirectoryReader; import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; +import org.opensearch.action.admin.cluster.stats.ClusterStatsResponse; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; @@ -38,6 +40,8 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.termvectors.TermVectorsRequestBuilder; +import org.opensearch.action.termvectors.TermVectorsResponse; import org.opensearch.action.update.UpdateResponse; import org.opensearch.client.Requests; import org.opensearch.cluster.ClusterState; @@ -57,7 +61,9 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexModule; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.SegmentReplicationPressureService; import org.opensearch.index.SegmentReplicationShardStats; @@ -81,6 +87,7 @@ import org.opensearch.transport.TransportService; import org.junit.Before; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -89,6 +96,7 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static java.util.Arrays.asList; @@ -143,8 +151,9 @@ public void testPrimaryStopped_ReplicaPromoted() throws Exception { final ShardRouting replicaShardRouting = getShardRoutingForNodeName(replica); assertNotNull(replicaShardRouting); assertTrue(replicaShardRouting + " should be promoted as a primary", replicaShardRouting.primary()); - refresh(INDEX_NAME); - assertHitCount(client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 2); + final SearchResponse response = client(replica).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(); + // new primary should have at least the doc count from the first set of segments. + assertTrue(response.getHits().getTotalHits().value >= 1); // assert we can index into the new primary. client().prepareIndex(INDEX_NAME).setId("3").setSource("bar", "baz").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); @@ -1621,4 +1630,207 @@ public void testRealtimeMultiGetRequestsUnsuccessful() { assertTrue(mgetResponse.getResponses()[1].isFailed()); } + + /** + * Tests whether segment replication supports realtime termvector requests and reads and parses source from the translog to serve strong reads. + */ + public void testRealtimeTermVectorRequestsSuccessful() throws IOException { + final String primary = internalCluster().startDataOnlyNode(); + XContentBuilder mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("field") + .field("type", "text") + .field("term_vector", "with_positions_offsets_payloads") + .field("analyzer", "tv_test") + .endObject() + .endObject() + .endObject(); + // refresh interval disabled to ensure refresh rate of index (when data is ready for search) doesn't affect realtime termvectors + assertAcked( + prepareCreate(INDEX_NAME).setMapping(mapping) + .addAlias(new Alias("alias")) + .setSettings( + Settings.builder() + .put(indexSettings()) + .put("index.analysis.analyzer.tv_test.tokenizer", "standard") + .put("index.refresh_interval", -1) + .putList("index.analysis.analyzer.tv_test.filter", "lowercase") + ) + ); + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + final String id = routingKeyForShard(INDEX_NAME, 0); + + TermVectorsResponse response = client(replica).prepareTermVectors(indexOrAlias(), "1").get(); + assertFalse(response.isExists()); + + // index doc 1 + client().prepareIndex(INDEX_NAME) + .setId(Integer.toString(1)) + .setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog").endObject()) + .execute() + .actionGet(); + + // non realtime termvectors 1 + response = client().prepareTermVectors(indexOrAlias(), Integer.toString(1)).setRealtime(false).get(); + assertFalse(response.isExists()); + + // realtime termvectors 1 + TermVectorsRequestBuilder resp = client().prepareTermVectors(indexOrAlias(), Integer.toString(1)) + .setPayloads(true) + .setOffsets(true) + .setPositions(true) + .setRealtime(true) + .setSelectedFields(); + response = resp.execute().actionGet(); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + assertThat("doc id: " + 1 + " doesn't exists but should", response.isExists(), equalTo(true)); + Fields fields = response.getFields(); + assertThat(fields.size(), equalTo(1)); + + // index doc 2 with routing + client().prepareIndex(INDEX_NAME) + .setId(Integer.toString(2)) + .setRouting(id) + .setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog").endObject()) + .execute() + .actionGet(); + + // realtime termvectors 2 with routing + resp = client().prepareTermVectors(indexOrAlias(), Integer.toString(2)) + .setPayloads(true) + .setOffsets(true) + .setPositions(true) + .setRouting(id) + .setSelectedFields(); + response = resp.execute().actionGet(); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + assertThat("doc id: " + 1 + " doesn't exists but should", response.isExists(), equalTo(true)); + fields = response.getFields(); + assertThat(fields.size(), equalTo(1)); + + } + + public void testRealtimeTermVectorRequestsUnSuccessful() throws IOException { + final String primary = internalCluster().startDataOnlyNode(); + XContentBuilder mapping = jsonBuilder().startObject() + .startObject("properties") + .startObject("field") + .field("type", "text") + .field("term_vector", "with_positions_offsets_payloads") + .field("analyzer", "tv_test") + .endObject() + .endObject() + .endObject(); + // refresh interval disabled to ensure refresh rate of index (when data is ready for search) doesn't affect realtime termvectors + assertAcked( + prepareCreate(INDEX_NAME).setMapping(mapping) + .addAlias(new Alias("alias")) + .setSettings( + Settings.builder() + .put(indexSettings()) + .put("index.analysis.analyzer.tv_test.tokenizer", "standard") + .put("index.refresh_interval", -1) + .putList("index.analysis.analyzer.tv_test.filter", "lowercase") + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + ) + ); + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + final String id = routingKeyForShard(INDEX_NAME, 0); + final String routingOtherShard = routingKeyForShard(INDEX_NAME, 1); + + // index doc 1 + client().prepareIndex(INDEX_NAME) + .setId(Integer.toString(1)) + .setSource(jsonBuilder().startObject().field("field", "the quick brown fox jumps over the lazy dog").endObject()) + .setRouting(id) + .execute() + .actionGet(); + + // non realtime termvectors 1 + TermVectorsResponse response = client().prepareTermVectors(indexOrAlias(), Integer.toString(1)).setRealtime(false).get(); + assertFalse(response.isExists()); + + // realtime termvectors (preference = _replica) + TermVectorsRequestBuilder resp = client(replica).prepareTermVectors(indexOrAlias(), Integer.toString(1)) + .setPayloads(true) + .setOffsets(true) + .setPositions(true) + .setPreference(Preference.REPLICA.type()) + .setRealtime(true) + .setSelectedFields(); + response = resp.execute().actionGet(); + + assertFalse(response.isExists()); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + + // realtime termvectors (with routing set) + resp = client(replica).prepareTermVectors(indexOrAlias(), Integer.toString(1)) + .setPayloads(true) + .setOffsets(true) + .setPositions(true) + .setRouting(routingOtherShard) + .setSelectedFields(); + response = resp.execute().actionGet(); + + assertFalse(response.isExists()); + assertThat(response.getIndex(), equalTo(INDEX_NAME)); + + } + + public void testReplicaAlreadyAtCheckpoint() throws Exception { + final List nodes = new ArrayList<>(); + final String primaryNode = internalCluster().startDataOnlyNode(); + nodes.add(primaryNode); + final Settings settings = Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(); + createIndex(INDEX_NAME, settings); + ensureGreen(INDEX_NAME); + // start a replica node, initially will be empty with no shard assignment. + final String replicaNode = internalCluster().startDataOnlyNode(); + nodes.add(replicaNode); + final String replicaNode2 = internalCluster().startDataOnlyNode(); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2)) + ); + ensureGreen(INDEX_NAME); + + // index a doc. + client().prepareIndex(INDEX_NAME).setId("1").setSource("foo", randomInt()).get(); + refresh(INDEX_NAME); + + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + IndexShard replica_1 = getIndexShard(replicaNode, INDEX_NAME); + IndexShard replica_2 = getIndexShard(replicaNode2, INDEX_NAME); + // wait until a replica is promoted & finishes engine flip, we don't care which one + AtomicReference primary = new AtomicReference<>(); + assertBusy(() -> { + assertTrue("replica should be promoted as a primary", replica_1.routingEntry().primary() || replica_2.routingEntry().primary()); + primary.set(replica_1.routingEntry().primary() ? replica_1 : replica_2); + }); + + FlushRequest request = new FlushRequest(INDEX_NAME); + request.force(true); + primary.get().flush(request); + + assertBusy(() -> { + assertEquals( + replica_1.getLatestReplicationCheckpoint().getSegmentInfosVersion(), + replica_2.getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + }); + + assertBusy(() -> { + ClusterStatsResponse clusterStatsResponse = client().admin().cluster().prepareClusterStats().get(); + ReplicationStats replicationStats = clusterStatsResponse.getIndicesStats().getSegments().getReplicationStats(); + assertEquals(0L, replicationStats.maxBytesBehind); + assertEquals(0L, replicationStats.maxReplicationLag); + assertEquals(0L, replicationStats.totalBytesBehind); + }); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java index 97e2045285d2f..69395dd853142 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java @@ -26,6 +26,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportService; @@ -55,6 +56,7 @@ private void createIndex(int replicaCount) { * This test verifies happy path when primary shard is relocated newly added node (target) in the cluster. Before * relocation and after relocation documents are indexed and documents are verified */ + @TestLogging(reason = "Getting trace logs from replication,shard and allocation package", value = "org.opensearch.indices.replication:TRACE, org.opensearch.index.shard:TRACE, org.opensearch.cluster.routing.allocation:TRACE") public void testPrimaryRelocation() throws Exception { final String oldPrimary = internalCluster().startNode(); createIndex(1); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationResizeRequestIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationResizeRequestIT.java new file mode 100644 index 0000000000000..fb06a97bd51c2 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationResizeRequestIT.java @@ -0,0 +1,248 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.action.admin.indices.shrink.ResizeType; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.Preference; +import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; + +/** + * This test class verifies Resize Reequests (Shrink, Split, Clone) with segment replication as replication strategy. + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 2) +public class SegmentReplicationResizeRequestIT extends SegmentReplicationBaseIT { + + public void testCreateShrinkIndexThrowsExceptionWhenReplicasBehind() throws Exception { + + // create index with -1 as refresh interval as we are blocking segrep and we want to control refreshes. + prepareCreate("test").setSettings( + Settings.builder() + .put(indexSettings()) + .put("index.refresh_interval", -1) + .put("index.number_of_replicas", 1) + .put("number_of_shards", 2) + ).get(); + + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + + // block Segment Replication so that replicas never get the docs from primary + CountDownLatch latch = new CountDownLatch(1); + try (final Releasable ignored = blockReplication(List.of(discoveryNodes[0].getName()), latch)) { + final int docs = 500; + for (int i = 0; i < docs; i++) { + client().prepareIndex("test").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + + // block writes on index before performing shrink operation + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings( + Settings.builder() + .put("index.routing.allocation.require._name", discoveryNodes[0].getName()) + .put("index.blocks.write", true) + ) + .get(); + ensureGreen(); + + // Trigger Shrink operation, as replicas don't have any docs it will throw exception that replicas haven't caught up + IllegalStateException exception = assertThrows( + IllegalStateException.class, + () -> client().admin() + .indices() + .prepareResizeIndex("test", "target") + .setResizeType(ResizeType.SHRINK) + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 1) + .putNull("index.blocks.write") + .putNull("index.routing.allocation.require._name") + .build() + ) + .get() + ); + assertEquals( + " For index [test] replica shards haven't caught up with primary, please retry after sometime.", + exception.getMessage() + ); + + } + + } + + public void testCreateSplitIndexWithSegmentReplicationBlocked() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(3); + + // create index with -1 as refresh interval as we are blocking segrep and we want to control refreshes. + prepareCreate("test").setSettings( + Settings.builder() + .put(indexSettings()) + .put("index.refresh_interval", -1) + .put("index.number_of_replicas", 1) + .put("number_of_shards", 3) + ).get(); + + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + + CountDownLatch latch = new CountDownLatch(1); + + // block Segment Replication so that replicas never get the docs from primary + try (final Releasable ignored = blockReplication(List.of(discoveryNodes[0].getName()), latch)) { + final int docs = 500; + for (int i = 0; i < docs; i++) { + client().prepareIndex("test").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + refresh(); + assertBusy(() -> { + assertHitCount( + client().prepareSearch("test") + .setQuery(new TermsQueryBuilder("foo", "bar")) + .setPreference(Preference.PRIMARY.type()) + .get(), + docs + ); + }); + + // block writes on index before performing split operation + client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") + ) + .get(); + + // Trigger split operation + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("test", "target") + .setResizeType(ResizeType.SPLIT) + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 1) + .put("index.number_of_shards", 6) + .putNull("index.blocks.write") + .build() + ) + .get() + ); + ensureGreen(); + + // verify that all docs are present in new target index + assertHitCount( + client().prepareSearch("target") + .setQuery(new TermsQueryBuilder("foo", "bar")) + .setPreference(Preference.PRIMARY.type()) + .get(), + docs + ); + } + + } + + public void testCloneIndex() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(3); + + // create index with -1 as refresh interval as we are blocking segrep and we want to control refreshes. + prepareCreate("test").setSettings( + Settings.builder().put(indexSettings()).put("index.number_of_replicas", 1).put("number_of_shards", randomIntBetween(1, 5)) + ).get(); + + final Map dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + + CountDownLatch latch = new CountDownLatch(1); + + // block Segment Replication so that replicas never get the docs from primary + try (final Releasable ignored = blockReplication(List.of(discoveryNodes[0].getName()), latch)) { + final int docs = 500; + for (int i = 0; i < docs; i++) { + client().prepareIndex("test").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get(); + } + refresh(); + assertBusy(() -> { + assertHitCount( + client().prepareSearch("test") + .setQuery(new TermsQueryBuilder("foo", "bar")) + .setPreference(Preference.PRIMARY.type()) + .get(), + docs + ); + }); + + // block writes on index before performing clone operation + client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") + ) + .get(); + + // Trigger split operation + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("test", "target") + .setResizeType(ResizeType.CLONE) + .setSettings(Settings.builder().put("index.number_of_replicas", 1).putNull("index.blocks.write").build()) + .get() + ); + ensureGreen(); + + // verify that all docs are present in new target index + assertHitCount( + client().prepareSearch("target") + .setQuery(new TermsQueryBuilder("foo", "bar")) + .setPreference(Preference.PRIMARY.type()) + .get(), + docs + ); + } + + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java index 159de1a681f53..766471fdc0756 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationStatsIT.java @@ -8,9 +8,15 @@ package org.opensearch.indices.replication; +import org.opensearch.action.admin.cluster.node.stats.NodeStats; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.index.shard.IndexShard; @@ -357,4 +363,71 @@ public void testQueryAgainstDocRepIndex() { .actionGet(); assertTrue(segmentReplicationStatsResponse.getReplicationStats().isEmpty()); } + + public void testSegmentReplicationNodeAndIndexStats() throws Exception { + logger.info("--> start primary node"); + final String primaryNode = internalCluster().startNode(); + + logger.info("--> create index on node: {}", primaryNode); + assertAcked(prepareCreate(INDEX_NAME, Settings.builder().put(indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2))); + + ensureYellow(); + logger.info("--> start first replica node"); + final String replicaNode1 = internalCluster().startNode(); + + logger.info("--> start second replica node"); + final String replicaNode2 = internalCluster().startNode(); + + ensureGreen(); + CountDownLatch latch = new CountDownLatch(1); + // block replication + try (final Releasable ignored = blockReplication(List.of(replicaNode1, replicaNode2), latch)) { + // index another doc while blocked, this would not get replicated to the replicas. + Thread indexingThread = new Thread(() -> { + client().prepareIndex(INDEX_NAME).setId("2").setSource("foo2", randomInt()).get(); + refresh(INDEX_NAME); + }); + + indexingThread.start(); + indexingThread.join(); + latch.await(); + + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats() + .clear() + .setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Segments)) + .get(); + + for (NodeStats nodeStats : nodesStatsResponse.getNodes()) { + ReplicationStats replicationStats = nodeStats.getIndices().getSegments().getReplicationStats(); + // primary node - should hold replication statistics + if (nodeStats.getNode().getName().equals(primaryNode)) { + assertTrue(replicationStats.getMaxBytesBehind() > 0); + assertTrue(replicationStats.getTotalBytesBehind() > 0); + assertTrue(replicationStats.getMaxReplicationLag() > 0); + // 2 replicas so total bytes should be double of max + assertEquals(replicationStats.getMaxBytesBehind() * 2, replicationStats.getTotalBytesBehind()); + } + // replica nodes - should hold empty replication statistics + if (nodeStats.getNode().getName().equals(replicaNode1) || nodeStats.getNode().getName().equals(replicaNode2)) { + assertEquals(0, replicationStats.getMaxBytesBehind()); + assertEquals(0, replicationStats.getTotalBytesBehind()); + assertEquals(0, replicationStats.getMaxReplicationLag()); + } + } + // get replication statistics at index level + IndicesStatsResponse stats = client().admin().indices().prepareStats().execute().actionGet(); + + // stats should be of non-zero value when aggregated at index level + ReplicationStats indexReplicationStats = stats.getIndex(INDEX_NAME).getTotal().getSegments().getReplicationStats(); + assertNotNull(indexReplicationStats); + assertTrue(indexReplicationStats.getMaxBytesBehind() > 0); + assertTrue(indexReplicationStats.getTotalBytesBehind() > 0); + assertTrue(indexReplicationStats.getMaxReplicationLag() > 0); + assertEquals(2 * indexReplicationStats.getMaxBytesBehind(), indexReplicationStats.getTotalBytesBehind()); + } + + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/ArchivedIndexSettingsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/ArchivedIndexSettingsIT.java new file mode 100644 index 0000000000000..8dc343abf8da2 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/ArchivedIndexSettingsIT.java @@ -0,0 +1,147 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.settings; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; +import static org.hamcrest.Matchers.startsWith; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, supportsDedicatedMasters = false) +public class ArchivedIndexSettingsIT extends OpenSearchIntegTestCase { + private volatile boolean installPlugin; + + public void testArchiveSettings() throws Exception { + installPlugin = true; + // Set up the cluster with an index containing dummy setting(owned by dummy plugin) + String oldClusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + String oldDataNode = internalCluster().startDataOnlyNode(); + assertEquals(2, internalCluster().numDataAndClusterManagerNodes()); + createIndex("test"); + ensureYellow(); + // Add a dummy setting + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.dummy", "foobar").put("index.dummy2", "foobar")) + .execute() + .actionGet(); + + // Remove dummy plugin and replace the cluster manager node so that the stale plugin setting moves to "archived". + installPlugin = false; + String newClusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(oldClusterManagerNode)); + internalCluster().restartNode(newClusterManagerNode); + + // Verify that archived settings exists. + assertBusy(() -> { + // Verify that cluster state is in recovered state. + assertFalse(client().admin().cluster().prepareState().get().getState().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK)); + assertTrue( + client().admin() + .indices() + .prepareGetSettings("test") + .get() + .getIndexToSettings() + .get("test") + .hasValue("archived.index.dummy") + ); + assertTrue( + client().admin() + .indices() + .prepareGetSettings("test") + .get() + .getIndexToSettings() + .get("test") + .hasValue("archived.index.dummy2") + ); + }, 30, TimeUnit.SECONDS); + + // Archived setting update should fail on open index. + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().putNull("archived.index.dummy")) + .execute() + .actionGet() + ); + assertThat( + exception.getMessage(), + startsWith("Can't update non dynamic settings [[archived.index.dummy]] for open indices [[test") + ); + + // close the index. + client().admin().indices().prepareClose("test").get(); + + // Remove archived.index.dummy explicitly. + assertTrue( + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().putNull("archived.index.dummy")) + .execute() + .actionGet() + .isAcknowledged() + ); + + // Remove archived.index.dummy2 using wildcard. + assertTrue( + client().admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().putNull("archived.*")) + .execute() + .actionGet() + .isAcknowledged() + ); + + // Verify that archived settings are cleaned up successfully. + assertFalse( + client().admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test").hasValue("archived.index.dummy") + ); + assertFalse( + client().admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test").hasValue("archived.index.dummy2") + ); + } + + @Override + protected Collection> nodePlugins() { + return installPlugin ? Arrays.asList(DummySettingPlugin.class) : Collections.emptyList(); + } + + public static class DummySettingPlugin extends Plugin { + public static final Setting DUMMY_SETTING = Setting.simpleString( + "index.dummy", + Setting.Property.IndexScope, + Setting.Property.Dynamic + ); + public static final Setting DUMMY_SETTING2 = Setting.simpleString( + "index.dummy2", + Setting.Property.IndexScope, + Setting.Property.Dynamic + ); + + @Override + public List> getSettings() { + return Arrays.asList(DUMMY_SETTING, DUMMY_SETTING2); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java index ae88dd76d54e0..547f9e7a8d380 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseIndexIT.java @@ -509,7 +509,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { } /** - * Test for https://github.com/elastic/elasticsearch/issues/47276 which checks that the persisted metadata on a data node does not + * Test for Elasticsearch issue #47276 which checks that the persisted metadata on a data node does not * become inconsistent when using replicated closed indices. */ public void testRelocatedClosedIndexIssue() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java index ef3c2c1235a3c..0967acb37d3e8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/stats/IndexStatsIT.java @@ -32,6 +32,8 @@ package org.opensearch.indices.stats; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -55,6 +57,7 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamOutput; @@ -63,24 +66,26 @@ import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; import org.opensearch.index.MergeSchedulerConfig; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.VersionType; import org.opensearch.index.cache.query.QueryCacheStats; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.remote.RemoteSegmentStats; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.translog.RemoteTranslogStats; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -103,6 +108,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @@ -116,7 +122,23 @@ @ClusterScope(scope = Scope.SUITE, numDataNodes = 2, numClientNodes = 0) @SuppressCodecs("*") // requires custom completion format -public class IndexStatsIT extends OpenSearchIntegTestCase { +public class IndexStatsIT extends ParameterizedOpenSearchIntegTestCase { + public IndexStatsIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { @@ -567,8 +589,8 @@ public void testNonThrottleStats() throws Exception { prepareCreate("test").setSettings( settingsBuilder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10000") ) @@ -599,8 +621,8 @@ public void testThrottleStats() throws Exception { prepareCreate("test").setSettings( settingsBuilder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "1") .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC.name()) @@ -1435,9 +1457,12 @@ public void testZeroRemoteStoreStatsOnNonRemoteStoreIndex() { .get() .status() ); - ShardStats shard = client().admin().indices().prepareStats(indexName).setSegments(true).get().getShards()[0]; + ShardStats shard = client().admin().indices().prepareStats(indexName).setSegments(true).setTranslog(true).get().getShards()[0]; RemoteSegmentStats remoteSegmentStatsFromIndexStats = shard.getStats().getSegments().getRemoteSegmentStats(); assertZeroRemoteSegmentStats(remoteSegmentStatsFromIndexStats); + RemoteTranslogStats remoteTranslogStatsFromIndexStats = shard.getStats().getTranslog().getRemoteTranslogStats(); + assertZeroRemoteTranslogStats(remoteTranslogStatsFromIndexStats); + NodesStatsResponse nodesStatsResponse = client().admin().cluster().prepareNodesStats(primaryNodeName(indexName)).get(); RemoteSegmentStats remoteSegmentStatsFromNodesStats = nodesStatsResponse.getNodes() .get(0) @@ -1445,17 +1470,22 @@ public void testZeroRemoteStoreStatsOnNonRemoteStoreIndex() { .getSegments() .getRemoteSegmentStats(); assertZeroRemoteSegmentStats(remoteSegmentStatsFromNodesStats); + RemoteTranslogStats remoteTranslogStatsFromNodesStats = nodesStatsResponse.getNodes() + .get(0) + .getIndices() + .getTranslog() + .getRemoteTranslogStats(); + assertZeroRemoteTranslogStats(remoteTranslogStatsFromNodesStats); } private void assertZeroRemoteSegmentStats(RemoteSegmentStats remoteSegmentStats) { - assertEquals(0, remoteSegmentStats.getUploadBytesStarted()); - assertEquals(0, remoteSegmentStats.getUploadBytesSucceeded()); - assertEquals(0, remoteSegmentStats.getUploadBytesFailed()); - assertEquals(0, remoteSegmentStats.getDownloadBytesStarted()); - assertEquals(0, remoteSegmentStats.getDownloadBytesSucceeded()); - assertEquals(0, remoteSegmentStats.getDownloadBytesFailed()); - assertEquals(0, remoteSegmentStats.getMaxRefreshBytesLag()); - assertEquals(0, remoteSegmentStats.getMaxRefreshTimeLag()); + // Compare with fresh object because all values default to 0 in default fresh object + assertEquals(new RemoteSegmentStats(), remoteSegmentStats); + } + + private void assertZeroRemoteTranslogStats(RemoteTranslogStats remoteTranslogStats) { + // Compare with fresh object because all values default to 0 in default fresh object + assertEquals(new RemoteTranslogStats(), remoteTranslogStats); } /** @@ -1474,4 +1504,37 @@ private void persistGlobalCheckpoint(String index) throws Exception { } } } + + public void testSegmentReplicationStats() { + String indexName = "test-index"; + createIndex( + indexName, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1).build() + ); + + ensureGreen(indexName); + + IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats(); + IndicesStatsResponse stats = builder.execute().actionGet(); + + // document replication enabled index should return empty segment replication stats + assertNotNull(stats.getIndex(indexName).getTotal().getSegments().getReplicationStats()); + + indexName = "test-index2"; + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build() + ); + ensureGreen(indexName); + + builder = client().admin().indices().prepareStats(); + stats = builder.execute().actionGet(); + + // segment replication enabled index should return segment replication stats + assertNotNull(stats.getIndex(indexName).getTotal().getSegments().getReplicationStats()); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java b/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java index 9e13f06983860..f77ae80a55276 100644 --- a/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/mget/SimpleMgetIT.java @@ -32,6 +32,8 @@ package org.opensearch.mget; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.get.MultiGetItemResponse; @@ -40,16 +42,20 @@ import org.opensearch.action.get.MultiGetResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.search.fetch.subphase.FetchSourceContext; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import java.util.Map; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -57,7 +63,24 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -public class SimpleMgetIT extends OpenSearchIntegTestCase { +public class SimpleMgetIT extends ParameterizedOpenSearchIntegTestCase { + + public SimpleMgetIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } public void testThatMgetShouldWorkWithOneIndexMissing() throws IOException { createIndex("test"); diff --git a/server/src/internalClusterTest/java/org/opensearch/nodestats/NodeStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/nodestats/NodeStatsIT.java new file mode 100644 index 0000000000000..f270cb1399072 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/nodestats/NodeStatsIT.java @@ -0,0 +1,276 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.nodestats; + +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.bulk.BulkItemResponse; +import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.delete.DeleteRequest; +import org.opensearch.action.delete.DeleteResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.update.UpdateRequest; +import org.opensearch.action.update.UpdateResponse; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.index.engine.DocumentMissingException; +import org.opensearch.index.engine.VersionConflictEngineException; +import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.hamcrest.MatcherAssert; + +import java.util.Arrays; +import java.util.Comparator; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +@ClusterScope(scope = Scope.TEST, numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) +public class NodeStatsIT extends OpenSearchIntegTestCase { + + private final DocStatusStats expectedDocStatusStats = new DocStatusStats(); + private static final String FIELD = "dummy_field"; + private static final String VALUE = "dummy_value"; + private static final Map SOURCE = singletonMap(FIELD, VALUE); + + public void testNodeIndicesStatsDocStatusStatsIndexBulk() { + { // Testing Index + final String INDEX = "test_index"; + final String ID = "id"; + { // Testing Normal Index + IndexResponse response = client().index(new IndexRequest(INDEX).id(ID).source(SOURCE)).actionGet(); + updateExpectedDocStatusCounter(response); + + MatcherAssert.assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertDocStatusStats(); + } + { // Testing Missing Alias + updateExpectedDocStatusCounter( + expectThrows( + IndexNotFoundException.class, + () -> client().index(new IndexRequest(INDEX).id("missing_alias").setRequireAlias(true).source(SOURCE)).actionGet() + ) + ); + assertDocStatusStats(); + } + { + // Test Missing Pipeline: Ingestion failure, not Indexing failure + expectThrows( + IllegalArgumentException.class, + () -> client().index(new IndexRequest(INDEX).id("missing_pipeline").setPipeline("missing").source(SOURCE)).actionGet() + ); + assertDocStatusStats(); + } + { // Testing Version Conflict + final String docId = "version_conflict"; + + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(docId).source(SOURCE)).actionGet()); + updateExpectedDocStatusCounter( + expectThrows( + VersionConflictEngineException.class, + () -> client().index(new IndexRequest(INDEX).id(docId).source(SOURCE).setIfSeqNo(1L).setIfPrimaryTerm(99L)) + .actionGet() + ) + ); + assertDocStatusStats(); + } + } + { // Testing Bulk + final String INDEX = "bulk_index"; + + int sizeOfIndexRequests = scaledRandomIntBetween(10, 20); + int sizeOfDeleteRequests = scaledRandomIntBetween(5, sizeOfIndexRequests); + int sizeOfNotFoundRequests = scaledRandomIntBetween(5, sizeOfIndexRequests); + + BulkRequest bulkRequest = new BulkRequest(); + + for (int i = 0; i < sizeOfIndexRequests; ++i) { + bulkRequest.add(new IndexRequest(INDEX).id(String.valueOf(i)).source(SOURCE)); + } + + BulkResponse response = client().bulk(bulkRequest).actionGet(); + + MatcherAssert.assertThat(response.hasFailures(), equalTo(false)); + MatcherAssert.assertThat(response.getItems().length, equalTo(sizeOfIndexRequests)); + + for (BulkItemResponse itemResponse : response.getItems()) { + updateExpectedDocStatusCounter(itemResponse.getResponse()); + } + + refresh(INDEX); + bulkRequest.requests().clear(); + + for (int i = 0; i < sizeOfDeleteRequests; ++i) { + bulkRequest.add(new DeleteRequest(INDEX, String.valueOf(i))); + } + for (int i = 0; i < sizeOfNotFoundRequests; ++i) { + bulkRequest.add(new DeleteRequest(INDEX, String.valueOf(25 + i))); + } + + response = client().bulk(bulkRequest).actionGet(); + + MatcherAssert.assertThat(response.hasFailures(), equalTo(false)); + MatcherAssert.assertThat(response.getItems().length, equalTo(sizeOfDeleteRequests + sizeOfNotFoundRequests)); + + for (BulkItemResponse itemResponse : response.getItems()) { + updateExpectedDocStatusCounter(itemResponse.getResponse()); + } + + refresh(INDEX); + assertDocStatusStats(); + } + } + + public void testNodeIndicesStatsDocStatusStatsCreateDeleteUpdate() { + { // Testing Create + final String INDEX = "create_index"; + final String ID = "id"; + { // Testing Creation + IndexResponse response = client().index(new IndexRequest(INDEX).id(ID).source(SOURCE).create(true)).actionGet(); + updateExpectedDocStatusCounter(response); + + MatcherAssert.assertThat(response.getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertDocStatusStats(); + } + { // Testing Version Conflict + final String docId = "version_conflict"; + + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(docId).source(SOURCE)).actionGet()); + updateExpectedDocStatusCounter( + expectThrows( + VersionConflictEngineException.class, + () -> client().index(new IndexRequest(INDEX).id(docId).source(SOURCE).create(true)).actionGet() + ) + ); + assertDocStatusStats(); + } + } + { // Testing Delete + final String INDEX = "delete_index"; + final String ID = "id"; + { // Testing Deletion + IndexResponse response = client().index(new IndexRequest(INDEX).id(ID).source(SOURCE)).actionGet(); + updateExpectedDocStatusCounter(response); + + DeleteResponse deleteResponse = client().delete(new DeleteRequest(INDEX, ID)).actionGet(); + updateExpectedDocStatusCounter(deleteResponse); + + MatcherAssert.assertThat(response.getSeqNo(), greaterThanOrEqualTo(0L)); + MatcherAssert.assertThat(deleteResponse.getResult(), equalTo(DocWriteResponse.Result.DELETED)); + assertDocStatusStats(); + } + { // Testing Non-Existing Doc + updateExpectedDocStatusCounter(client().delete(new DeleteRequest(INDEX, "does_not_exist")).actionGet()); + assertDocStatusStats(); + } + { // Testing Version Conflict + final String docId = "version_conflict"; + + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(docId).source(SOURCE)).actionGet()); + updateExpectedDocStatusCounter( + expectThrows( + VersionConflictEngineException.class, + () -> client().delete(new DeleteRequest(INDEX, docId).setIfSeqNo(2L).setIfPrimaryTerm(99L)).actionGet() + ) + ); + + assertDocStatusStats(); + } + } + { // Testing Update + final String INDEX = "update_index"; + final String ID = "id"; + { // Testing Not Found + updateExpectedDocStatusCounter( + expectThrows( + DocumentMissingException.class, + () -> client().update(new UpdateRequest(INDEX, ID).doc(SOURCE)).actionGet() + ) + ); + assertDocStatusStats(); + } + { // Testing NoOp Update + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(ID).source(SOURCE)).actionGet()); + + UpdateResponse response = client().update(new UpdateRequest(INDEX, ID).doc(SOURCE)).actionGet(); + updateExpectedDocStatusCounter(response); + + MatcherAssert.assertThat(response.getResult(), equalTo(DocWriteResponse.Result.NOOP)); + assertDocStatusStats(); + } + { // Testing Update + final String UPDATED_VALUE = "updated_value"; + UpdateResponse response = client().update(new UpdateRequest(INDEX, ID).doc(singletonMap(FIELD, UPDATED_VALUE))).actionGet(); + updateExpectedDocStatusCounter(response); + + MatcherAssert.assertThat(response.getResult(), equalTo(DocWriteResponse.Result.UPDATED)); + assertDocStatusStats(); + } + { // Testing Missing Alias + updateExpectedDocStatusCounter( + expectThrows( + IndexNotFoundException.class, + () -> client().update(new UpdateRequest(INDEX, ID).setRequireAlias(true).doc(new IndexRequest().source(SOURCE))) + .actionGet() + ) + ); + assertDocStatusStats(); + } + { // Testing Version Conflict + final String docId = "version_conflict"; + + updateExpectedDocStatusCounter(client().index(new IndexRequest(INDEX).id(docId).source(SOURCE)).actionGet()); + updateExpectedDocStatusCounter( + expectThrows( + VersionConflictEngineException.class, + () -> client().update(new UpdateRequest(INDEX, docId).doc(SOURCE).setIfSeqNo(2L).setIfPrimaryTerm(99L)).actionGet() + ) + ); + assertDocStatusStats(); + } + } + } + + private void assertDocStatusStats() { + DocStatusStats docStatusStats = client().admin() + .cluster() + .prepareNodesStats() + .execute() + .actionGet() + .getNodes() + .get(0) + .getIndices() + .getIndexing() + .getTotal() + .getDocStatusStats(); + + assertTrue( + Arrays.equals( + docStatusStats.getDocStatusCounter(), + expectedDocStatusStats.getDocStatusCounter(), + Comparator.comparingLong(AtomicLong::longValue) + ) + ); + } + + private void updateExpectedDocStatusCounter(DocWriteResponse r) { + expectedDocStatusStats.inc(r.status()); + } + + private void updateExpectedDocStatusCounter(Exception e) { + expectedDocStatusStats.inc(ExceptionsHelper.status(e)); + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java index f636185fd4649..d28df90216beb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java @@ -198,11 +198,11 @@ public void testNoRebalanceOnRollingRestart() throws Exception { // see https://github.com/elastic/elasticsearch/issues/14387 internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNodes(3); - /** - * We start 3 nodes and a dedicated cluster-manager. Restart on of the data-nodes and ensure that we got no relocations. - * Yet we have 6 shards 0 replica so that means if the restarting node comes back both other nodes are subject - * to relocating to the restarting node since all had 2 shards and now one node has nothing allocated. - * We have a fix for this to wait until we have allocated unallocated shards now so this shouldn't happen. + /* + We start 3 nodes and a dedicated cluster-manager. Restart on of the data-nodes and ensure that we got no relocations. + Yet we have 6 shards 0 replica so that means if the restarting node comes back both other nodes are subject + to relocating to the restarting node since all had 2 shards and now one node has nothing allocated. + We have a fix for this to wait until we have allocated unallocated shards now so this shouldn't happen. */ prepareCreate("test").setSettings( Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java index 6d2d8df106513..8166c0008ed83 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java @@ -12,25 +12,27 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.util.FileSystemUtils; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; -import org.opensearch.test.FeatureFlagSetter; -import org.junit.Before; import java.io.IOException; import java.nio.file.Path; import java.util.Arrays; import java.util.Collections; +import java.util.Locale; import java.util.Set; import java.util.stream.Collectors; -import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; public abstract class AbstractRemoteStoreMockRepositoryIntegTestCase extends AbstractSnapshotIntegTestCase { @@ -38,18 +40,6 @@ public abstract class AbstractRemoteStoreMockRepositoryIntegTestCase extends Abs protected static final String TRANSLOG_REPOSITORY_NAME = "my-translog-repo-1"; protected static final String INDEX_NAME = "remote-store-test-idx-1"; - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); - } - - @Before - public void setup() { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - FeatureFlagSetter.set(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL); - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REPOSITORY_NAME, TRANSLOG_REPOSITORY_NAME)); - } - @Override public Settings indexSettings() { return remoteStoreIndexSettings(0); @@ -66,34 +56,83 @@ protected Settings remoteStoreIndexSettings(int numberOfReplicas) { .build(); } - protected void deleteRepo() { - logger.info("--> Deleting the repository={}", REPOSITORY_NAME); - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); - logger.info("--> Deleting the repository={}", TRANSLOG_REPOSITORY_NAME); - assertAcked(clusterAdmin().prepareDeleteRepository(TRANSLOG_REPOSITORY_NAME)); + public Settings buildRemoteStoreNodeAttributes(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure) { + String segmentRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + REPOSITORY_NAME + ); + String translogRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + TRANSLOG_REPOSITORY_NAME + ); + String segmentRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + REPOSITORY_NAME + ); + String translogRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + TRANSLOG_REPOSITORY_NAME + ); + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + REPOSITORY_NAME + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + REPOSITORY_NAME + ); + + return Settings.builder() + .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, REPOSITORY_NAME) + .put(segmentRepoTypeAttributeKey, "mock") + .put(segmentRepoSettingsAttributeKeyPrefix + "location", repoLocation) + .put(segmentRepoSettingsAttributeKeyPrefix + "random_control_io_exception_rate", ioFailureRate) + .put(segmentRepoSettingsAttributeKeyPrefix + "skip_exception_on_verification_file", true) + .put(segmentRepoSettingsAttributeKeyPrefix + "skip_exception_on_list_blobs", true) + .put(segmentRepoSettingsAttributeKeyPrefix + "skip_exception_on_blobs", skipExceptionBlobList) + .put(segmentRepoSettingsAttributeKeyPrefix + "max_failure_number", maxFailure) + .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, TRANSLOG_REPOSITORY_NAME) + .put(translogRepoTypeAttributeKey, "mock") + .put(translogRepoSettingsAttributeKeyPrefix + "location", repoLocation) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, REPOSITORY_NAME) + .put(stateRepoTypeAttributeKey, "mock") + .put(stateRepoSettingsAttributeKeyPrefix + "location", repoLocation) + .build(); + } + + protected void cleanupRepo() { + logger.info("--> Cleanup the repository={}", REPOSITORY_NAME); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).execute().actionGet(); + logger.info("--> Cleanup the repository={}", TRANSLOG_REPOSITORY_NAME); + clusterAdmin().prepareCleanupRepository(TRANSLOG_REPOSITORY_NAME).execute().actionGet(); } protected String setup(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure) { - logger.info("--> Creating repository={} at the path={}", REPOSITORY_NAME, repoLocation); + return setup(repoLocation, ioFailureRate, skipExceptionBlobList, maxFailure, 0); + } + + protected String setup(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure, int replicaCount) { // The random_control_io_exception_rate setting ensures that 10-25% of all operations to remote store results in /// IOException. skip_exception_on_verification_file & skip_exception_on_list_blobs settings ensures that the // repository creation can happen without failure. - createRepository( - REPOSITORY_NAME, - "mock", - Settings.builder() - .put("location", repoLocation) - .put("random_control_io_exception_rate", ioFailureRate) - .put("skip_exception_on_verification_file", true) - .put("skip_exception_on_list_blobs", true) - // Skipping is required for metadata as it is part of recovery - .put("skip_exception_on_blobs", skipExceptionBlobList) - .put("max_failure_number", maxFailure) - ); - logger.info("--> Creating repository={} at the path={}", TRANSLOG_REPOSITORY_NAME, repoLocation); - createRepository(TRANSLOG_REPOSITORY_NAME, "mock", Settings.builder().put("location", repoLocation)); + Settings.Builder settings = Settings.builder() + .put(buildRemoteStoreNodeAttributes(repoLocation, ioFailureRate, skipExceptionBlobList, maxFailure)); + + if (randomBoolean()) { + settings.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT); + } + + disableRepoConsistencyCheck("Remote Store Creates System Repository"); - String dataNodeName = internalCluster().startDataOnlyNodes(1).get(0); + internalCluster().startClusterManagerOnlyNode(settings.build()); + String dataNodeName = internalCluster().startDataOnlyNode(settings.build()); + internalCluster().startDataOnlyNodes(replicaCount, settings.build()); createIndex(INDEX_NAME); logger.info("--> Created index={}", INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); @@ -126,7 +165,7 @@ private String getLocalSegmentFilename(String remoteFilename) { return remoteFilename.split(RemoteSegmentStoreDirectory.SEGMENT_NAME_UUID_SEPARATOR)[0]; } - private IndexResponse indexSingleDoc() { + protected IndexResponse indexSingleDoc() { return client().prepareIndex(INDEX_NAME) .setId(UUIDs.randomBase64UUID()) .setSource(randomAlphaOfLength(5), randomAlphaOfLength(5)) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java new file mode 100644 index 0000000000000..99c5d7fb2bae7 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/BaseRemoteStoreRestoreIT.java @@ -0,0 +1,116 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.transport.MockTransportService; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; + +public class BaseRemoteStoreRestoreIT extends RemoteStoreBaseIntegTestCase { + static final String INDEX_NAME = "remote-store-test-idx-1"; + static final String INDEX_NAMES = "test-remote-store-1,test-remote-store-2,remote-store-test-index-1,remote-store-test-index-2"; + static final String INDEX_NAMES_WILDCARD = "test-remote-store-*,remote-store-test-index-*"; + static final String TOTAL_OPERATIONS = "total-operations"; + static final String REFRESHED_OR_FLUSHED_OPERATIONS = "refreshed-or-flushed-operations"; + static final String MAX_SEQ_NO_TOTAL = "max-seq-no-total"; + + @Override + public Settings indexSettings() { + return remoteStoreIndexSettings(0); + } + + public Settings indexSettings(int shards, int replicas) { + return remoteStoreIndexSettings(replicas, shards); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + + protected void restore(String... indices) { + restore(randomBoolean(), indices); + } + + protected void restore(boolean restoreAllShards, String... indices) { + if (restoreAllShards) { + assertAcked(client().admin().indices().prepareClose(indices)); + } + client().admin() + .cluster() + .restoreRemoteStore( + new RestoreRemoteStoreRequest().indices(indices).restoreAllShards(restoreAllShards), + PlainActionFuture.newFuture() + ); + } + + protected void verifyRestoredData(Map indexStats, String indexName, boolean indexMoreData) throws Exception { + ensureYellowAndNoInitializingShards(indexName); + ensureGreen(indexName); + // This is to ensure that shards that were already assigned will get latest count + refresh(indexName); + assertBusy( + () -> assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(TOTAL_OPERATIONS)), + 30, + TimeUnit.SECONDS + ); + if (indexMoreData == false) return; + + IndexResponse response = indexSingleDoc(indexName); + if (indexStats.containsKey(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id())) { + assertEquals(indexStats.get(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id()) + 1, response.getSeqNo()); + } + refresh(indexName); + assertBusy( + () -> assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(TOTAL_OPERATIONS) + 1), + 30, + TimeUnit.SECONDS + ); + } + + protected void verifyRestoredData(Map indexStats, String indexName) throws Exception { + verifyRestoredData(indexStats, indexName, true); + } + + public void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, String indices, int replicaCount, int shardCount) { + prepareCluster(numClusterManagerNodes, numDataOnlyNodes, indices, replicaCount, shardCount, Settings.EMPTY); + } + + public void prepareCluster( + int numClusterManagerNodes, + int numDataOnlyNodes, + String indices, + int replicaCount, + int shardCount, + Settings settings + ) { + prepareCluster(numClusterManagerNodes, numDataOnlyNodes, settings); + for (String index : indices.split(",")) { + createIndex(index, remoteStoreIndexSettings(replicaCount, shardCount)); + ensureYellowAndNoInitializingShards(index); + ensureGreen(index); + } + } + + public void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, Settings settings) { + internalCluster().startClusterManagerOnlyNodes(numClusterManagerNodes, settings); + internalCluster().startDataOnlyNodes(numDataOnlyNodes, settings); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRepIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRepIT.java index 37dab5faaeb57..e1ab101fddf55 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRepIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRepIT.java @@ -75,8 +75,8 @@ public void testDefaultRemoteStoreNoUserOverrideExceptReplicationTypeSegment() t verifyRemoteStoreIndexSettings( indexSettings, "true", - "my-segment-repo-1", - "my-translog-repo-1", + REPOSITORY_NAME, + REPOSITORY_2_NAME, ReplicationType.SEGMENT.toString(), IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL ); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java index 9991126bb790c..d427a4db84ba2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java @@ -13,12 +13,9 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexSettings; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; -import org.junit.After; import org.junit.Before; import java.util.Locale; @@ -28,53 +25,15 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; -import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) -public class CreateRemoteIndexIT extends OpenSearchIntegTestCase { - - @After - public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository("my-segment-repo-1")); - assertAcked(clusterAdmin().prepareDeleteRepository("my-translog-repo-1")); - assertAcked(clusterAdmin().prepareDeleteRepository("my-custom-repo")); - } - - @Override - protected Settings nodeSettings(int nodeOriginal) { - Settings settings = super.nodeSettings(nodeOriginal); - Settings.Builder builder = Settings.builder() - .put(remoteStoreClusterSettings("my-segment-repo-1", "my-translog-repo-1")) - .put(settings); - return builder.build(); - } - - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); - } +public class CreateRemoteIndexIT extends RemoteStoreBaseIntegTestCase { @Before - public void setup() { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(); - assertAcked( - clusterAdmin().preparePutRepository("my-segment-repo-1") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath().toAbsolutePath())) - ); - assertAcked( - clusterAdmin().preparePutRepository("my-translog-repo-1") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath().toAbsolutePath())) - ); - assertAcked( - clusterAdmin().preparePutRepository("my-custom-repo") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath().toAbsolutePath())) - ); + public void setup() throws Exception { + internalCluster().startNodes(2); } public void testDefaultRemoteStoreNoUserOverride() throws Exception { @@ -91,8 +50,8 @@ public void testDefaultRemoteStoreNoUserOverride() throws Exception { verifyRemoteStoreIndexSettings( indexSettings, "true", - "my-segment-repo-1", - "my-translog-repo-1", + REPOSITORY_NAME, + REPOSITORY_2_NAME, ReplicationType.SEGMENT.toString(), IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL ); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java index f72d107a367de..e14a4062f7775 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java @@ -27,7 +27,9 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.transport.MockTransportService; +import org.junit.Before; +import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; import java.util.HashSet; @@ -35,22 +37,28 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) - public class PrimaryTermValidationIT extends RemoteStoreBaseIntegTestCase { private static final String INDEX_NAME = "remote-store-test-idx-1"; + protected Path absolutePath; + protected Path absolutePath2; @Override protected Collection> nodePlugins() { return Arrays.asList(MockTransportService.TestPlugin.class); } + @Before + public void setup() { + absolutePath = randomRepoPath().toAbsolutePath(); + absolutePath2 = randomRepoPath().toAbsolutePath(); + } + public void testPrimaryTermValidation() throws Exception { // Follower checker interval is lower compared to leader checker so that the cluster manager can remove the node // with network partition faster. The follower check retry count is also kept 1. @@ -61,20 +69,12 @@ public void testPrimaryTermValidation() throws Exception { .put(FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), "1s") .put(FollowersChecker.FOLLOWER_CHECK_INTERVAL_SETTING.getKey(), "1s") .put(FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) - .put(remoteStoreClusterSettings(REPOSITORY_NAME, REPOSITORY_2_NAME, true)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath, REPOSITORY_2_NAME, absolutePath2)) .build(); internalCluster().startClusterManagerOnlyNode(clusterSettings); - - // Create repository - absolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath)) - ); - absolutePath2 = randomRepoPath().toAbsolutePath(); - putRepository(absolutePath2, REPOSITORY_2_NAME); - - // Start data nodes and create index internalCluster().startDataOnlyNodes(2, clusterSettings); + + // Create index createIndex(INDEX_NAME, remoteStoreIndexSettings(1)); ensureYellowAndNoInitializingShards(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -156,6 +156,7 @@ public void testPrimaryTermValidation() throws Exception { // received the following exception. ShardNotFoundException exception = assertThrows(ShardNotFoundException.class, () -> indexSameDoc(primaryNode, INDEX_NAME)); assertTrue(exception.getMessage().contains("no such shard")); + internalCluster().clearDisruptionScheme(); ensureStableCluster(3); ensureGreen(INDEX_NAME); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java index e4dcd637ac448..d8b7718a55377 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java @@ -10,7 +10,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.indices.recovery.IndexPrimaryRelocationIT; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; @@ -18,26 +17,21 @@ import java.nio.file.Path; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteIndexPrimaryRelocationIT extends IndexPrimaryRelocationIT { protected static final String REPOSITORY_NAME = "test-remote-store-repo"; protected Path absolutePath; - public void setup() { - absolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath)) - ); - } - protected Settings nodeSettings(int nodeOrdinal) { + if (absolutePath == null) { + absolutePath = randomRepoPath().toAbsolutePath(); + } return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(remoteStoreClusterSettings(REPOSITORY_NAME, REPOSITORY_NAME, false)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) .build(); } @@ -55,17 +49,8 @@ public Settings indexSettings() { .build(); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .build(); - } - - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9191") public void testPrimaryRelocationWhileIndexing() throws Exception { + internalCluster().startClusterManagerOnlyNode(); super.testPrimaryRelocationWhileIndexing(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java index 35962ba43a1df..c957f1b338bfe 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java @@ -10,7 +10,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.indices.recovery.IndexRecoveryIT; @@ -24,40 +23,27 @@ import java.nio.file.Path; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteIndexRecoveryIT extends IndexRecoveryIT { protected static final String REPOSITORY_NAME = "test-remote-store-repo"; - protected Path absolutePath; + protected Path repositoryPath; - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(remoteStoreClusterSettings(REPOSITORY_NAME)).build(); + @Before + public void setup() { + repositoryPath = randomRepoPath().toAbsolutePath(); } @Override - protected Settings featureFlagSettings() { + protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, repositoryPath)) .build(); } - @Before - @Override - public void setUp() throws Exception { - super.setUp(); - internalCluster().startClusterManagerOnlyNode(); - absolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath)) - ); - } - @Override public Settings indexSettings() { return Settings.builder() @@ -70,7 +56,7 @@ public Settings indexSettings() { @After public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); } @Override @@ -171,4 +157,10 @@ public void testDisconnectsDuringRecovery() { public void testReplicaRecovery() { } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9580") + public void testRerouteRecovery() { + + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 70e571604ca53..149f7abd6f286 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -12,6 +12,7 @@ import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.delete.DeleteResponse; @@ -21,20 +22,32 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.index.Index; import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; +import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; import org.junit.Before; import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; +import java.util.Optional; import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; @@ -43,6 +56,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteRestoreSnapshotIT extends AbstractSnapshotIntegTestCase { private static final String BASE_REMOTE_REPO = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; private Path remoteRepoPath; @@ -50,20 +64,18 @@ public class RemoteRestoreSnapshotIT extends AbstractSnapshotIntegTestCase { @Before public void setup() { remoteRepoPath = randomRepoPath().toAbsolutePath(); - createRepository(BASE_REMOTE_REPO, "fs", remoteRepoPath); } @After public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(BASE_REMOTE_REPO)); + clusterAdmin().prepareCleanupRepository(BASE_REMOTE_REPO).get(); } @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(remoteStoreClusterSettings(BASE_REMOTE_REPO)) + .put(remoteStoreClusterSettings(BASE_REMOTE_REPO, remoteRepoPath)) .build(); } @@ -339,6 +351,8 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); assertEquals(restoreSnapshotResponse2.status(), RestStatus.ACCEPTED); ensureGreen(indexName1, restoredIndexName2); + + assertRemoteSegmentsAndTranslogUploaded(restoredIndexName2); assertDocsPresentInIndex(client, indexName1, numDocsInIndex1); assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); @@ -360,6 +374,97 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { assertDocsPresentInIndex(client, indexName1, numDocsInIndex1 + 2); } + void assertRemoteSegmentsAndTranslogUploaded(String idx) throws IOException { + String indexUUID = client().admin().indices().prepareGetSettings(idx).get().getSetting(idx, IndexMetadata.SETTING_INDEX_UUID); + + Path remoteTranslogMetadataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/metadata"); + Path remoteTranslogDataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/data"); + Path segmentMetadataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/segments/metadata"); + Path segmentDataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/segments/data"); + + try ( + Stream translogMetadata = Files.list(remoteTranslogMetadataPath); + Stream translogData = Files.list(remoteTranslogDataPath); + Stream segmentMetadata = Files.list(segmentMetadataPath); + Stream segmentData = Files.list(segmentDataPath); + + ) { + assertTrue(translogData.count() > 0); + assertTrue(translogMetadata.count() > 0); + assertTrue(segmentMetadata.count() > 0); + assertTrue(segmentData.count() > 0); + } + + } + + public void testRemoteRestoreIndexRestoredFromSnapshot() throws IOException, ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + + String indexName1 = "testindex1"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); + + Settings indexSettings = getIndexSettings(1, 0).build(); + createIndex(indexName1, indexSettings); + + final int numDocsInIndex1 = randomIntBetween(20, 30); + indexDocuments(client(), indexName1, numDocsInIndex1); + flushAndRefresh(indexName1); + ensureGreen(indexName1); + + logger.info("--> snapshot"); + SnapshotInfo snapshotInfo1 = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(Arrays.asList(indexName1))); + assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); + assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); + + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(indexName1)).get()); + assertFalse(indexExists(indexName1)); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndices(indexName1) + .get(); + + assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); + ensureGreen(indexName1); + assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); + + assertRemoteSegmentsAndTranslogUploaded(indexName1); + + // Clear the local data before stopping the node. This will make sure that remote translog is empty. + IndexShard indexShard = getIndexShard(primaryNodeName(indexName1), indexName1); + try (Stream files = Files.list(indexShard.shardPath().resolveTranslog())) { + IOUtils.deleteFilesIgnoringExceptions(files.collect(Collectors.toList())); + } + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(indexName1))); + + ensureRed(indexName1); + + client().admin() + .cluster() + .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(indexName1).restoreAllShards(false), PlainActionFuture.newFuture()); + + ensureGreen(indexName1); + assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); + } + + protected IndexShard getIndexShard(String node, String indexName) { + final Index index = resolveIndex(indexName); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); + IndexService indexService = indicesService.indexService(index); + assertNotNull(indexService); + final Optional shardId = indexService.shardIds().stream().findFirst(); + return shardId.map(indexService::getShard).orElse(null); + } + public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException { String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); String primary = internalCluster().startDataOnlyNode(); @@ -441,7 +546,7 @@ public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); } - public void testRestoreShallowSnapshotRepositoryOverriden() throws ExecutionException, InterruptedException { + public void testRestoreShallowSnapshotRepository() throws ExecutionException, InterruptedException { String indexName1 = "testindex1"; String snapshotRepoName = "test-restore-snapshot-repo"; String remoteStoreRepoNameUpdated = "test-rs-repo-updated" + TEST_REMOTE_STORE_REPO_SUFFIX; @@ -491,22 +596,74 @@ public void testRestoreShallowSnapshotRepositoryOverriden() throws ExecutionExce ); assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); - createRepository(BASE_REMOTE_REPO, "fs", absolutePath2); - - RestoreSnapshotResponse restoreSnapshotResponse = client.admin() + client().admin().indices().close(Requests.closeIndexRequest(indexName1)).get(); + createRepository(remoteStoreRepoNameUpdated, "fs", remoteRepoPath); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() .cluster() .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) .setWaitForCompletion(true) .setIndices(indexName1) .setRenamePattern(indexName1) .setRenameReplacement(restoredIndexName1) + .setSourceRemoteStoreRepository(remoteStoreRepoNameUpdated) .get(); - assertTrue(restoreSnapshotResponse.getRestoreInfo().failedShards() > 0); + assertTrue(restoreSnapshotResponse2.getRestoreInfo().failedShards() == 0); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); - ensureRed(restoredIndexName1); + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); + } + + public void testRestoreShallowSnapshotIndexAfterSnapshot() throws ExecutionException, InterruptedException { + String indexName1 = "testindex1"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String remoteStoreRepoNameUpdated = "test-rs-repo-updated" + TEST_REMOTE_STORE_REPO_SUFFIX; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + Path absolutePath2 = randomRepoPath().toAbsolutePath(); + String[] pathTokens = absolutePath1.toString().split("/"); + String basePath = pathTokens[pathTokens.length - 1]; + Arrays.copyOf(pathTokens, pathTokens.length - 1); + Path location = PathUtils.get(String.join("/", pathTokens)); + pathTokens = absolutePath2.toString().split("/"); + String basePath2 = pathTokens[pathTokens.length - 1]; + Arrays.copyOf(pathTokens, pathTokens.length - 1); + Path location2 = PathUtils.get(String.join("/", pathTokens)); + logger.info("Path 1 [{}]", absolutePath1); + logger.info("Path 2 [{}]", absolutePath2); + String restoredIndexName1 = indexName1 + "-restored"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(location, basePath, true)); + + Client client = client(); + Settings indexSettings = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + createIndex(indexName1, indexSettings); + + int numDocsInIndex1 = randomIntBetween(2, 5); + indexDocuments(client, indexName1, numDocsInIndex1); + + ensureGreen(indexName1); + + logger.info("--> snapshot"); + SnapshotInfo snapshotInfo1 = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(List.of(indexName1))); + assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); + assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); + + int extraNumDocsInIndex1 = randomIntBetween(20, 50); + indexDocuments(client, indexName1, extraNumDocsInIndex1); + refresh(indexName1); - client().admin().indices().close(Requests.closeIndexRequest(restoredIndexName1)).get(); + client().admin().indices().close(Requests.closeIndexRequest(indexName1)).get(); createRepository(remoteStoreRepoNameUpdated, "fs", remoteRepoPath); RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() .cluster() diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java deleted file mode 100644 index 71a174e300fe8..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java +++ /dev/null @@ -1,237 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.remotestore; - -import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; -import org.opensearch.action.admin.indices.stats.CommonStatsFlags; -import org.opensearch.common.settings.Settings; -import org.opensearch.index.remote.RemoteSegmentStats; -import org.opensearch.test.OpenSearchIntegTestCase; -import org.junit.Before; - -import java.util.concurrent.TimeUnit; - -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class RemoteSegmentStatsFromNodesStatsIT extends RemoteStoreBaseIntegTestCase { - private static final String INDEX_NAME = "remote-index-1"; - private static final int DATA_NODE_COUNT = 2; - private static final int CLUSTER_MANAGER_NODE_COUNT = 3; - - @Before - public void setup() { - setupCustomCluster(); - setupRepo(false); - } - - private void setupCustomCluster() { - internalCluster().startClusterManagerOnlyNodes(CLUSTER_MANAGER_NODE_COUNT); - internalCluster().startDataOnlyNodes(DATA_NODE_COUNT); - ensureStableCluster(DATA_NODE_COUNT + CLUSTER_MANAGER_NODE_COUNT); - } - - /** - * - Creates two indices with single primary shard, pinned to a single node. - * - Index documents in both of them and forces a fresh for both - * - Polls the _remotestore/stats API for individual index level stats - * - Adds up requisite fields from the API output, repeats this for the 2nd index - * - Polls _nodes/stats and verifies that the total values at node level adds up - * to the values capture in the previous step - */ - public void testNodesStatsParityWithOnlyPrimaryShards() { - String[] dataNodes = internalCluster().getDataNodeNames().toArray(String[]::new); - String randomDataNode = dataNodes[randomIntBetween(0, dataNodes.length - 1)]; - String firstIndex = INDEX_NAME + "1"; - String secondIndex = INDEX_NAME + "2"; - - // Create first index - createIndex( - firstIndex, - Settings.builder().put(remoteStoreIndexSettings(0, 1)).put("index.routing.allocation.require._name", randomDataNode).build() - ); - ensureGreen(firstIndex); - indexSingleDoc(firstIndex, true); - - // Create second index - createIndex( - secondIndex, - Settings.builder().put(remoteStoreIndexSettings(0, 1)).put("index.routing.allocation.require._name", randomDataNode).build() - ); - ensureGreen(secondIndex); - indexSingleDoc(secondIndex, true); - - long cumulativeUploadsSucceeded = 0, cumulativeUploadsStarted = 0, cumulativeUploadsFailed = 0; - long max_bytes_lag = 0, max_time_lag = 0; - // Fetch upload stats - RemoteStoreStatsResponse remoteStoreStatsFirstIndex = client(randomDataNode).admin() - .cluster() - .prepareRemoteStoreStats(firstIndex, "0") - .setLocal(true) - .get(); - cumulativeUploadsSucceeded += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesSucceeded; - cumulativeUploadsStarted += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesStarted; - cumulativeUploadsFailed += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesFailed; - max_bytes_lag = Math.max(max_bytes_lag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); - max_time_lag = Math.max(max_time_lag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); - - RemoteStoreStatsResponse remoteStoreStatsSecondIndex = client(randomDataNode).admin() - .cluster() - .prepareRemoteStoreStats(secondIndex, "0") - .setLocal(true) - .get(); - cumulativeUploadsSucceeded += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesSucceeded; - cumulativeUploadsStarted += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesStarted; - cumulativeUploadsFailed += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesFailed; - max_bytes_lag = Math.max(max_bytes_lag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); - max_time_lag = Math.max(max_time_lag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); - - // Fetch nodes stats - NodesStatsResponse nodesStatsResponse = client().admin() - .cluster() - .prepareNodesStats(randomDataNode) - .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true)) - .get(); - RemoteSegmentStats remoteSegmentStats = nodesStatsResponse.getNodes().get(0).getIndices().getSegments().getRemoteSegmentStats(); - assertEquals(cumulativeUploadsSucceeded, remoteSegmentStats.getUploadBytesSucceeded()); - assertEquals(cumulativeUploadsStarted, remoteSegmentStats.getUploadBytesStarted()); - assertEquals(cumulativeUploadsFailed, remoteSegmentStats.getUploadBytesFailed()); - assertEquals(max_bytes_lag, remoteSegmentStats.getMaxRefreshBytesLag()); - assertEquals(max_time_lag, remoteSegmentStats.getMaxRefreshTimeLag()); - } - - /** - * - Creates two indices with single primary shard and single replica - * - Index documents in both of them and forces a fresh for both - * - Polls the _remotestore/stats API for individual index level stats - * - Adds up requisite fields from the API output for both indices - * - Polls _nodes/stats and verifies that the total values at node level adds up - * to the values capture in the previous step - * - Repeats the above 3 steps for the second node - */ - public void testNodesStatsParityWithReplicaShards() throws Exception { - String firstIndex = INDEX_NAME + "1"; - String secondIndex = INDEX_NAME + "2"; - - createIndex(firstIndex, Settings.builder().put(remoteStoreIndexSettings(1, 1)).build()); - ensureGreen(firstIndex); - indexSingleDoc(firstIndex, true); - - // Create second index - createIndex(secondIndex, Settings.builder().put(remoteStoreIndexSettings(1, 1)).build()); - ensureGreen(secondIndex); - indexSingleDoc(secondIndex, true); - - assertBusy(() -> assertNodeStatsParityAcrossNodes(firstIndex, secondIndex), 15, TimeUnit.SECONDS); - } - - /** - * Ensures that node stats shows 0 values for dedicated cluster manager nodes - * since cluster manager nodes does not participate in indexing - */ - public void testZeroRemoteStatsOnNodesStatsForClusterManager() { - createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); - ensureGreen(INDEX_NAME); - indexSingleDoc(INDEX_NAME); - refresh(INDEX_NAME); - NodesStatsResponse nodesStatsResponseForClusterManager = client().admin() - .cluster() - .prepareNodesStats(internalCluster().getClusterManagerName()) - .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true)) - .get(); - assertTrue( - nodesStatsResponseForClusterManager.getNodes().get(0).getNode().isClusterManagerNode() - && !nodesStatsResponseForClusterManager.getNodes().get(0).getNode().isDataNode() - ); - assertZeroRemoteSegmentStats( - nodesStatsResponseForClusterManager.getNodes().get(0).getIndices().getSegments().getRemoteSegmentStats() - ); - NodesStatsResponse nodesStatsResponseForDataNode = client().admin() - .cluster() - .prepareNodesStats(primaryNodeName(INDEX_NAME)) - .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true)) - .get(); - assertTrue(nodesStatsResponseForDataNode.getNodes().get(0).getNode().isDataNode()); - RemoteSegmentStats remoteSegmentStats = nodesStatsResponseForDataNode.getNodes() - .get(0) - .getIndices() - .getSegments() - .getRemoteSegmentStats(); - assertTrue(remoteSegmentStats.getUploadBytesStarted() > 0); - assertTrue(remoteSegmentStats.getUploadBytesSucceeded() > 0); - } - - private void assertZeroRemoteSegmentStats(RemoteSegmentStats remoteSegmentStats) { - assertEquals(0, remoteSegmentStats.getUploadBytesStarted()); - assertEquals(0, remoteSegmentStats.getUploadBytesSucceeded()); - assertEquals(0, remoteSegmentStats.getUploadBytesFailed()); - assertEquals(0, remoteSegmentStats.getDownloadBytesStarted()); - assertEquals(0, remoteSegmentStats.getDownloadBytesSucceeded()); - assertEquals(0, remoteSegmentStats.getDownloadBytesFailed()); - assertEquals(0, remoteSegmentStats.getMaxRefreshBytesLag()); - assertEquals(0, remoteSegmentStats.getMaxRefreshTimeLag()); - } - - private static void assertNodeStatsParityAcrossNodes(String firstIndex, String secondIndex) { - for (String dataNode : internalCluster().getDataNodeNames()) { - long cumulativeUploadsSucceeded = 0, cumulativeUploadsStarted = 0, cumulativeUploadsFailed = 0; - long cumulativeDownloadsSucceeded = 0, cumulativeDownloadsStarted = 0, cumulativeDownloadsFailed = 0; - long max_bytes_lag = 0, max_time_lag = 0; - // Fetch upload stats - RemoteStoreStatsResponse remoteStoreStatsFirstIndex = client(dataNode).admin() - .cluster() - .prepareRemoteStoreStats(firstIndex, "0") - .setLocal(true) - .get(); - cumulativeUploadsSucceeded += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesSucceeded; - cumulativeUploadsStarted += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesStarted; - cumulativeUploadsFailed += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesFailed; - cumulativeDownloadsSucceeded += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0] - .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesSucceeded; - cumulativeDownloadsStarted += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0] - .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesStarted; - cumulativeDownloadsFailed += remoteStoreStatsFirstIndex.getRemoteStoreStats()[0] - .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesFailed; - max_bytes_lag = Math.max(max_bytes_lag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); - max_time_lag = Math.max(max_time_lag, remoteStoreStatsFirstIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); - - RemoteStoreStatsResponse remoteStoreStatsSecondIndex = client(dataNode).admin() - .cluster() - .prepareRemoteStoreStats(secondIndex, "0") - .setLocal(true) - .get(); - cumulativeUploadsSucceeded += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesSucceeded; - cumulativeUploadsStarted += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesStarted; - cumulativeUploadsFailed += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().uploadBytesFailed; - cumulativeDownloadsSucceeded += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0] - .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesSucceeded; - cumulativeDownloadsStarted += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0] - .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesStarted; - cumulativeDownloadsFailed += remoteStoreStatsSecondIndex.getRemoteStoreStats()[0] - .getSegmentStats().directoryFileTransferTrackerStats.transferredBytesFailed; - max_bytes_lag = Math.max(max_bytes_lag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().bytesLag); - max_time_lag = Math.max(max_time_lag, remoteStoreStatsSecondIndex.getRemoteStoreStats()[0].getSegmentStats().refreshTimeLagMs); - - // Fetch nodes stats - NodesStatsResponse nodesStatsResponse = client().admin() - .cluster() - .prepareNodesStats(dataNode) - .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true)) - .get(); - RemoteSegmentStats remoteSegmentStats = nodesStatsResponse.getNodes().get(0).getIndices().getSegments().getRemoteSegmentStats(); - assertEquals(cumulativeUploadsSucceeded, remoteSegmentStats.getUploadBytesSucceeded()); - assertEquals(cumulativeUploadsStarted, remoteSegmentStats.getUploadBytesStarted()); - assertEquals(cumulativeUploadsFailed, remoteSegmentStats.getUploadBytesFailed()); - assertEquals(cumulativeDownloadsSucceeded, remoteSegmentStats.getDownloadBytesSucceeded()); - assertEquals(cumulativeDownloadsStarted, remoteSegmentStats.getDownloadBytesStarted()); - assertEquals(cumulativeDownloadsFailed, remoteSegmentStats.getDownloadBytesFailed()); - assertEquals(max_bytes_lag, remoteSegmentStats.getMaxRefreshBytesLag()); - assertEquals(max_time_lag, remoteSegmentStats.getMaxRefreshTimeLag()); - } - } -} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java similarity index 57% rename from server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java rename to server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java index d02c5bf54fbed..f19c9db7874db 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureAndResiliencyIT.java @@ -11,13 +11,20 @@ import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.action.admin.indices.flush.FlushResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AbstractAsyncTask; +import org.opensearch.common.util.concurrent.UncategorizedExecutionException; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.IndexService; import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; import org.opensearch.repositories.RepositoriesService; import org.opensearch.snapshots.mockstore.MockRepository; import org.opensearch.test.OpenSearchIntegTestCase; @@ -33,7 +40,7 @@ import static org.opensearch.index.remote.RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class RemoteStoreBackpressureIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { +public class RemoteStoreBackpressureAndResiliencyIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { public void testWritesRejectedDueToConsecutiveFailureBreach() throws Exception { // Here the doc size of the request remains same throughout the test. After initial indexing, all remote store interactions // fail leading to consecutive failure limit getting exceeded and leading to rejections. @@ -49,7 +56,7 @@ public void testWritesRejectedDueToBytesLagBreach() throws Exception { public void testWritesRejectedDueToTimeLagBreach() throws Exception { // Initially indexing happens with doc size of 1KB, then all remote store interactions start failing. Now, the // indexing happens with doc size of 1 byte leading to time lag limit getting exceeded and leading to rejections. - validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 20, ByteSizeUnit.BYTES.toIntBytes(1), 15, "time_lag"); + validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 20, ByteSizeUnit.BYTES.toIntBytes(1), 3, "time_lag"); } private void validateBackpressure( @@ -112,7 +119,7 @@ private void validateBackpressure( stats = stats(); indexDocAndRefresh(initialSource, initialDocsToIndex); assertEquals(rejectionCount, stats.rejectionCount); - deleteRepo(); + cleanupRepo(); } private RemoteSegmentTransferTracker.Stats stats() { @@ -126,11 +133,13 @@ private RemoteSegmentTransferTracker.Stats stats() { return matches.get(0).getSegmentStats(); } - private void indexDocAndRefresh(BytesReference source, int iterations) { + private void indexDocAndRefresh(BytesReference source, int iterations) throws InterruptedException { for (int i = 0; i < iterations; i++) { client().prepareIndex(INDEX_NAME).setSource(source, MediaTypeRegistry.JSON).get(); refresh(INDEX_NAME); } + Thread.sleep(250); + client().prepareIndex(INDEX_NAME).setSource(source, MediaTypeRegistry.JSON).get(); } /** @@ -156,4 +165,98 @@ private String generateString(int sizeInBytes) { sb.append("}"); return sb.toString(); } + + /** + * Fixes Github#10398 + */ + public void testAsyncTrimTaskSucceeds() { + Path location = randomRepoPath().toAbsolutePath(); + String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE); + + logger.info("Increasing the frequency of async trim task to ensure it runs in background while indexing"); + IndexService indexService = internalCluster().getInstance(IndicesService.class, dataNodeName).iterator().next(); + ((AbstractAsyncTask) indexService.getTrimTranslogTask()).setInterval(TimeValue.timeValueMillis(100)); + + logger.info("--> Indexing data"); + indexData(randomIntBetween(2, 5), true); + logger.info("--> Indexing succeeded"); + + MockRepository translogRepo = (MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName) + .repository(TRANSLOG_REPOSITORY_NAME); + logger.info("--> Failing all remote store interaction"); + translogRepo.setRandomControlIOExceptionRate(1d); + + for (int i = 0; i < randomIntBetween(5, 10); i++) { + UncategorizedExecutionException exception = assertThrows(UncategorizedExecutionException.class, this::indexSingleDoc); + assertEquals("Failed execution", exception.getMessage()); + } + + translogRepo.setRandomControlIOExceptionRate(0d); + indexSingleDoc(); + logger.info("Indexed single doc successfully"); + } + + /** + * Fixes Github#10400 + */ + public void testSkipLoadGlobalCheckpointToReplicationTracker() { + Path location = randomRepoPath().toAbsolutePath(); + String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE); + + logger.info("--> Indexing data"); + indexData(randomIntBetween(1, 2), true); + logger.info("--> Indexing succeeded"); + + IndexService indexService = internalCluster().getInstance(IndicesService.class, dataNodeName).iterator().next(); + IndexShard indexShard = indexService.getShard(0); + indexShard.failShard("failing shard", null); + + ensureRed(INDEX_NAME); + + MockRepository translogRepo = (MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName) + .repository(TRANSLOG_REPOSITORY_NAME); + logger.info("--> Failing all remote store interaction"); + translogRepo.setRandomControlIOExceptionRate(1d); + client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + // CLuster stays red still as the remote interactions are still failing + ensureRed(INDEX_NAME); + + logger.info("Retrying to allocate failed shards"); + client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + // CLuster stays red still as the remote interactions are still failing + ensureRed(INDEX_NAME); + + logger.info("Stop failing all remote store interactions"); + translogRepo.setRandomControlIOExceptionRate(0d); + client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + ensureGreen(INDEX_NAME); + } + + public void testFlushDuringRemoteUploadFailures() { + Path location = randomRepoPath().toAbsolutePath(); + String dataNodeName = setup(location, 0d, "metadata", Long.MAX_VALUE); + + logger.info("--> Indexing data"); + indexData(randomIntBetween(1, 2), true); + logger.info("--> Indexing succeeded"); + ensureGreen(INDEX_NAME); + + MockRepository translogRepo = (MockRepository) internalCluster().getInstance(RepositoriesService.class, dataNodeName) + .repository(TRANSLOG_REPOSITORY_NAME); + logger.info("--> Failing all remote store interaction"); + translogRepo.setRandomControlIOExceptionRate(1d); + + Exception ex = assertThrows(UncategorizedExecutionException.class, () -> indexSingleDoc()); + assertEquals("Failed execution", ex.getMessage()); + + FlushResponse flushResponse = client().admin().indices().prepareFlush(INDEX_NAME).setForce(true).execute().actionGet(); + assertEquals(1, flushResponse.getFailedShards()); + ensureGreen(INDEX_NAME); + + logger.info("--> Stop failing all remote store interactions"); + translogRepo.setRandomControlIOExceptionRate(0d); + flushResponse = client().admin().indices().prepareFlush(INDEX_NAME).setForce(true).execute().actionGet(); + assertEquals(1, flushResponse.getSuccessfulShards()); + assertEquals(0, flushResponse.getFailedShards()); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index b627ddb713fbd..8b4981a15433a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -16,13 +16,20 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.action.support.WriteRequest; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; @@ -34,27 +41,30 @@ import java.nio.file.attribute.BasicFileAttributes; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { protected static final String REPOSITORY_NAME = "test-remote-store-repo"; protected static final String REPOSITORY_2_NAME = "test-remote-store-repo-2"; protected static final int SHARD_COUNT = 1; - protected static final int REPLICA_COUNT = 1; + protected static int REPLICA_COUNT = 1; protected static final String TOTAL_OPERATIONS = "total-operations"; protected static final String REFRESHED_OR_FLUSHED_OPERATIONS = "refreshed-or-flushed-operations"; protected static final String MAX_SEQ_NO_TOTAL = "max-seq-no-total"; protected static final String MAX_SEQ_NO_REFRESHED_OR_FLUSHED = "max-seq-no-refreshed-or-flushed"; - protected Path absolutePath; - protected Path absolutePath2; + protected Path segmentRepoPath; + protected Path translogRepoPath; + protected boolean clusterSettingsSuppliedByTest = false; private final List documentKeys = List.of( randomAlphaOfLength(5), randomAlphaOfLength(5), @@ -108,19 +118,18 @@ protected boolean addMockInternalEngine() { @Override protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(remoteStoreClusterSettings(REPOSITORY_NAME, REPOSITORY_2_NAME, true)) - .build(); - } - - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .build(); + if (segmentRepoPath == null || translogRepoPath == null) { + segmentRepoPath = randomRepoPath().toAbsolutePath(); + translogRepoPath = randomRepoPath().toAbsolutePath(); + } + if (clusterSettingsSuppliedByTest) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).build(); + } else { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) + .build(); + } } public Settings indexSettings() { @@ -153,28 +162,119 @@ protected BulkResponse indexBulk(String indexName, int numDocs) { return client().bulk(bulkRequest).actionGet(); } - public static Settings remoteStoreClusterSettings(String segmentRepoName) { - return remoteStoreClusterSettings(segmentRepoName, segmentRepoName); + public static Settings remoteStoreClusterSettings(String name, Path path) { + return remoteStoreClusterSettings(name, path, name, path); } public static Settings remoteStoreClusterSettings( String segmentRepoName, + Path segmentRepoPath, + String segmentRepoType, String translogRepoName, - boolean randomizeSameRepoForRSSAndRTS + Path translogRepoPath, + String translogRepoType ) { - return remoteStoreClusterSettings( + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put( + buildRemoteStoreNodeAttributes( + segmentRepoName, + segmentRepoPath, + segmentRepoType, + translogRepoName, + translogRepoPath, + translogRepoType, + false + ) + ); + return settingsBuilder.build(); + } + + public static Settings remoteStoreClusterSettings( + String segmentRepoName, + Path segmentRepoPath, + String translogRepoName, + Path translogRepoPath + ) { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(buildRemoteStoreNodeAttributes(segmentRepoName, segmentRepoPath, translogRepoName, translogRepoPath, false)); + return settingsBuilder.build(); + } + + public static Settings buildRemoteStoreNodeAttributes( + String segmentRepoName, + Path segmentRepoPath, + String translogRepoName, + Path translogRepoPath, + boolean withRateLimiterAttributes + ) { + return buildRemoteStoreNodeAttributes( segmentRepoName, - randomizeSameRepoForRSSAndRTS ? (randomBoolean() ? translogRepoName : segmentRepoName) : translogRepoName + segmentRepoPath, + FsRepository.TYPE, + translogRepoName, + translogRepoPath, + FsRepository.TYPE, + withRateLimiterAttributes ); } - public static Settings remoteStoreClusterSettings(String segmentRepoName, String translogRepoName) { - return Settings.builder() - .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey(), segmentRepoName) - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), translogRepoName) - .build(); + public static Settings buildRemoteStoreNodeAttributes( + String segmentRepoName, + Path segmentRepoPath, + String segmentRepoType, + String translogRepoName, + Path translogRepoPath, + String translogRepoType, + boolean withRateLimiterAttributes + ) { + String segmentRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + segmentRepoName + ); + String segmentRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + segmentRepoName + ); + String translogRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + translogRepoName + ); + String translogRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + translogRepoName + ); + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + segmentRepoName + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + segmentRepoName + ); + + Settings.Builder settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) + .put(segmentRepoTypeAttributeKey, segmentRepoType) + .put(segmentRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) + .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName) + .put(translogRepoTypeAttributeKey, translogRepoType) + .put(translogRepoSettingsAttributeKeyPrefix + "location", translogRepoPath) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) + .put(stateRepoTypeAttributeKey, segmentRepoType) + .put(stateRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath); + + if (withRateLimiterAttributes) { + settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) + .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); + } + + return settings.build(); } private Settings defaultIndexSettings() { @@ -208,35 +308,62 @@ protected Settings remoteStoreIndexSettings(int numberOfReplicas, long totalFiel .build(); } - protected void putRepository(Path path) { - putRepository(path, REPOSITORY_NAME); + @After + public void teardown() { + clusterSettingsSuppliedByTest = false; + assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_NAME); + assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_2_NAME); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); + clusterAdmin().prepareCleanupRepository(REPOSITORY_2_NAME).get(); } - protected void putRepository(Path path, String repoName) { - assertAcked(clusterAdmin().preparePutRepository(repoName).setType("fs").setSettings(Settings.builder().put("location", path))); - } + public RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name) { + Map nodeAttributes = node.getAttributes(); + String type = nodeAttributes.get(String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, name)); - protected void setupRepo() { - setupRepo(true); - } + String settingsAttributeKeyPrefix = String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, name); + Map settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> node.getAttributes().get(key))); - protected void setupRepo(boolean startDedicatedClusterManager) { - if (startDedicatedClusterManager) { - internalCluster().startClusterManagerOnlyNode(); - } - absolutePath = randomRepoPath().toAbsolutePath(); - putRepository(absolutePath); - absolutePath2 = randomRepoPath().toAbsolutePath(); - putRepository(absolutePath2, REPOSITORY_2_NAME); + Settings.Builder settings = Settings.builder(); + settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue())); + settings.put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true); + + return new RepositoryMetadata(name, type, settings.build()); } - @After - public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_2_NAME)); + public void assertRemoteStoreRepositoryOnAllNodes(String repositoryName) { + RepositoriesMetadata repositories = internalCluster().getInstance(ClusterService.class, internalCluster().getNodeNames()[0]) + .state() + .metadata() + .custom(RepositoriesMetadata.TYPE); + RepositoryMetadata actualRepository = repositories.repository(repositoryName); + + final RepositoriesService repositoriesService = internalCluster().getClusterManagerNodeInstance(RepositoriesService.class); + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repositoryName); + + for (String nodeName : internalCluster().getNodeNames()) { + ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nodeName); + DiscoveryNode node = clusterService.localNode(); + RepositoryMetadata expectedRepository = buildRepositoryMetadata(node, repositoryName); + + // Validated that all the restricted settings are entact on all the nodes. + repository.getRestrictedSystemRepositorySettings() + .stream() + .forEach( + setting -> assertEquals( + String.format(Locale.ROOT, "Restricted Settings mismatch [%s]", setting.getKey()), + setting.get(actualRepository.settings()), + setting.get(expectedRepository.settings()) + ) + ); + } } - public int getFileCount(Path path) throws Exception { + public static int getFileCount(Path path) throws Exception { final AtomicInteger filesExisting = new AtomicInteger(0); Files.walkFileTree(path, new SimpleFileVisitor<>() { @Override @@ -253,5 +380,4 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { return filesExisting.get(); } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java new file mode 100644 index 0000000000000..c61e2ec6e4f6c --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -0,0 +1,466 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.indices.datastream.DataStreamRolloverIT; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexTemplateMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.cluster.coordination.ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_READ_ONLY_SETTING; +import static org.opensearch.cluster.metadata.Metadata.CLUSTER_READ_ONLY_BLOCK; +import static org.opensearch.cluster.metadata.Metadata.SETTING_READ_ONLY_SETTING; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStoreClusterStateRestoreIT extends BaseRemoteStoreRestoreIT { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true).build(); + } + + private void addNewNodes(int dataNodeCount, int clusterManagerNodeCount) { + internalCluster().startNodes(dataNodeCount + clusterManagerNodeCount); + } + + private Map initialTestSetup(int shardCount, int replicaCount, int dataNodeCount, int clusterManagerNodeCount) { + prepareCluster(clusterManagerNodeCount, dataNodeCount, INDEX_NAME, replicaCount, shardCount); + Map indexStats = indexData(1, false, INDEX_NAME); + assertEquals(shardCount * (replicaCount + 1), getNumShards(INDEX_NAME).totalNumShards); + ensureGreen(INDEX_NAME); + return indexStats; + } + + private void resetCluster(int dataNodeCount, int clusterManagerNodeCount) { + internalCluster().stopAllNodes(); + internalCluster().startClusterManagerOnlyNodes(clusterManagerNodeCount); + internalCluster().startDataOnlyNodes(dataNodeCount); + } + + protected void verifyRedIndicesAndTriggerRestore(Map indexStats, String indexName, boolean indexMoreDocs) + throws Exception { + ensureRed(indexName); + restore(false, indexName); + verifyRestoredData(indexStats, indexName, indexMoreDocs); + } + + public void testFullClusterRestore() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + // Step - 1 index some data to generate files in remote directory + Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1); + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); + + // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata + resetCluster(dataNodeCount, clusterManagerNodeCount); + + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + + // Step - 3 validate cluster state restored + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + validateMetadata(List.of(INDEX_NAME)); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); + + } + + /** + * This test scenario covers the case where right after remote state restore and persisting it to disk via LucenePersistedState, full cluster restarts. + * This is a special case for remote state as at this point cluster uuid in the restored state is still ClusterState.UNKNOWN_UUID as we persist it disk. + * After restart the local disk state will be read but should be again overridden with remote state. + * + * 1. Form a cluster and index few docs + * 2. Replace all nodes to remove all local disk state + * 3. Start cluster manager node without correct seeding to ensure local disk state is written with cluster uuid ClusterState.UNKNOWN_UUID but with remote restored Metadata + * 4. Restart the cluster manager node with correct seeding. + * 5. After restart the cluster manager picks up the local disk state with has same Metadata as remote but cluster uuid is still ClusterState.UNKNOWN_UUID + * 6. The cluster manager will try to restore from remote again. + * 7. Metadata loaded from local disk state will be overridden with remote Metadata and no conflict should arise. + * 8. Add data nodes to recover index data + * 9. Verify Metadata and index data is restored. + */ + public void testFullClusterRestoreDoesntFailWithConflictingLocalState() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + // index some data to generate files in remote directory + Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1); + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); + + // stop all nodes + internalCluster().stopAllNodes(); + + // start a cluster manager node with no cluster manager seeding. + // This should fail with IllegalStateException as cluster manager fails to form without any initial seed + assertThrows( + IllegalStateException.class, + () -> internalCluster().startClusterManagerOnlyNodes( + clusterManagerNodeCount, + Settings.builder() + .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey()) // disable seeding during bootstrapping + .build() + ) + ); + + // verify cluster manager not elected + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert Objects.equals(newClusterUUID, ClusterState.UNKNOWN_UUID) + : "Disabling Cluster manager seeding failed. cluster uuid is not unknown"; + + // restart cluster manager with correct seed + internalCluster().fullRestart(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) { + return Settings.builder() + .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), nodeName) // Seed with correct Cluster Manager node + .build(); + } + }); + + // validate new cluster state formed + newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, ClusterState.UNKNOWN_UUID) : "cluster restart not successful. cluster uuid is still unknown"; + assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + validateMetadata(List.of(INDEX_NAME)); + + // start data nodes to trigger index data recovery + internalCluster().startDataOnlyNodes(dataNodeCount); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); + } + + public void testFullClusterRestoreMultipleIndices() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + // Step - 1 index some data to generate files in remote directory + Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); + + String secondIndexName = INDEX_NAME + "-2"; + createIndex(secondIndexName, remoteStoreIndexSettings(replicaCount, shardCount + 1)); + Map indexStats2 = indexData(1, false, secondIndexName); + assertEquals((shardCount + 1) * (replicaCount + 1), getNumShards(secondIndexName).totalNumShards); + ensureGreen(secondIndexName); + updateIndexBlock(true, secondIndexName); + + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); + + // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata + resetCluster(dataNodeCount, clusterManagerNodeCount); + + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + + // Step - 3 validate cluster state restored + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + validateMetadata(List.of(INDEX_NAME, secondIndexName)); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, false); + verifyRedIndicesAndTriggerRestore(indexStats2, secondIndexName, false); + assertTrue(INDEX_READ_ONLY_SETTING.get(clusterService().state().metadata().index(secondIndexName).getSettings())); + assertThrows(ClusterBlockException.class, () -> indexSingleDoc(secondIndexName)); + // Test is complete + + // Remove the block to ensure proper cleanup + updateIndexBlock(false, secondIndexName); + } + + public void testFullClusterRestoreManifestFilePointsToInvalidIndexMetadataPathThrowsException() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + // Step - 1 index some data to generate files in remote directory + initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); + + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + String clusterName = clusterService().state().getClusterName().value(); + + // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata + internalCluster().stopAllNodes(); + // Step - 3 Delete index metadata file in remote + try { + Files.move( + segmentRepoPath.resolve( + RemoteClusterStateService.encodeString(clusterName) + "/cluster-state/" + prevClusterUUID + "/index" + ), + segmentRepoPath.resolve("cluster-state/") + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + assertThrows(IllegalStateException.class, () -> addNewNodes(dataNodeCount, clusterManagerNodeCount)); + // Test is complete + + // Starting a node without remote state to ensure test cleanup + internalCluster().startNode(Settings.builder().put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false).build()); + } + + public void testRemoteStateFullRestart() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 3; + + Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, clusterManagerNodeCount); + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); + // Delete index metadata file in remote + try { + Files.move( + segmentRepoPath.resolve( + RemoteClusterStateService.encodeString(clusterService().state().getClusterName().value()) + + "/cluster-state/" + + prevClusterUUID + + "/manifest" + ), + segmentRepoPath.resolve("cluster-state/") + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + internalCluster().fullRestart(); + ensureGreen(INDEX_NAME); + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert Objects.equals(newClusterUUID, prevClusterUUID) : "Full restart not successful. cluster uuid has changed"; + + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + validateCurrentMetadata(); + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true); + } + + private void validateMetadata(List indexNames) { + assertEquals(clusterService().state().metadata().indices().size(), indexNames.size()); + for (String indexName : indexNames) { + assertTrue(clusterService().state().metadata().hasIndex(indexName)); + } + } + + private void validateCurrentMetadata() throws Exception { + RemoteClusterStateService remoteClusterStateService = internalCluster().getInstance( + RemoteClusterStateService.class, + internalCluster().getClusterManagerName() + ); + assertBusy(() -> { + ClusterMetadataManifest manifest = remoteClusterStateService.getLatestClusterMetadataManifest( + getClusterState().getClusterName().value(), + getClusterState().metadata().clusterUUID() + ).get(); + ClusterState clusterState = getClusterState(); + Metadata currentMetadata = clusterState.metadata(); + assertEquals(currentMetadata.indices().size(), manifest.getIndices().size()); + assertEquals(currentMetadata.coordinationMetadata().term(), manifest.getClusterTerm()); + assertEquals(clusterState.version(), manifest.getStateVersion()); + assertEquals(clusterState.stateUUID(), manifest.getStateUUID()); + assertEquals(currentMetadata.clusterUUIDCommitted(), manifest.isClusterUUIDCommitted()); + for (UploadedIndexMetadata uploadedIndexMetadata : manifest.getIndices()) { + IndexMetadata currentIndexMetadata = currentMetadata.index(uploadedIndexMetadata.getIndexName()); + assertEquals(currentIndexMetadata.getIndex().getUUID(), uploadedIndexMetadata.getIndexUUID()); + } + }); + } + + public void testDataStreamPostRemoteStateRestore() throws Exception { + new DataStreamRolloverIT() { + protected boolean triggerRemoteStateRestore() { + return true; + } + }.testDataStreamRollover(); + } + + public void testFullClusterRestoreGlobalMetadata() throws Exception { + int shardCount = randomIntBetween(1, 2); + int replicaCount = 1; + int dataNodeCount = shardCount * (replicaCount + 1); + int clusterManagerNodeCount = 1; + + // Step - 1 index some data to generate files in remote directory + Map indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1); + String prevClusterUUID = clusterService().state().metadata().clusterUUID(); + long prevClusterStateVersion = clusterService().state().version(); + + // Create global metadata - register a custom repo + Path repoPath = registerCustomRepository(); + + // Create global metadata - persistent settings + updatePersistentSettings(Settings.builder().put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), 34).build()); + + // Create global metadata - index template + putIndexTemplate(); + + // Create global metadata - Put cluster block + addClusterLevelReadOnlyBlock(); + + // Step - 2 Replace all nodes in the cluster with new nodes. This ensures new cluster state doesn't have previous index metadata + resetCluster(dataNodeCount, clusterManagerNodeCount); + + String newClusterUUID = clusterService().state().metadata().clusterUUID(); + assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same"; + + // Step - 3 validate cluster state restored + long newClusterStateVersion = clusterService().state().version(); + assert prevClusterStateVersion < newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is greater than current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + + validateCurrentMetadata(); + assertEquals(Integer.valueOf(34), SETTING_CLUSTER_MAX_SHARDS_PER_NODE.get(clusterService().state().metadata().settings())); + assertEquals(true, SETTING_READ_ONLY_SETTING.get(clusterService().state().metadata().settings())); + assertTrue(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); + // Remote the cluster read only block to ensure proper cleanup + updatePersistentSettings(Settings.builder().put(SETTING_READ_ONLY_SETTING.getKey(), false).build()); + assertFalse(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); + + verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, false); + + // validate global metadata restored + verifyRestoredRepositories(repoPath); + verifyRestoredIndexTemplate(); + } + + private Path registerCustomRepository() { + Path path = randomRepoPath(); + assertAcked( + client().admin() + .cluster() + .preparePutRepository("custom-repo") + .setType("fs") + .setSettings(Settings.builder().put("location", path).put("compress", false)) + .get() + ); + return path; + } + + private void verifyRestoredRepositories(Path repoPath) { + RepositoriesMetadata repositoriesMetadata = clusterService().state().metadata().custom(RepositoriesMetadata.TYPE); + assertEquals(3, repositoriesMetadata.repositories().size()); // includes remote store repo as well + assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_NAME).settings())); + assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_2_NAME).settings())); + assertEquals("fs", repositoriesMetadata.repository("custom-repo").type()); + assertEquals( + Settings.builder().put("location", repoPath).put("compress", false).build(), + repositoriesMetadata.repository("custom-repo").settings() + ); + + // repo cleanup post verification + clusterAdmin().prepareDeleteRepository("custom-repo").get(); + } + + private void addClusterLevelReadOnlyBlock() throws InterruptedException, ExecutionException { + updatePersistentSettings(Settings.builder().put(SETTING_READ_ONLY_SETTING.getKey(), true).build()); + assertTrue(clusterService().state().blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); + } + + private void updatePersistentSettings(Settings settings) throws ExecutionException, InterruptedException { + ClusterUpdateSettingsRequest resetRequest = new ClusterUpdateSettingsRequest(); + resetRequest.persistentSettings(settings); + assertAcked(client().admin().cluster().updateSettings(resetRequest).get()); + } + + private void verifyRestoredIndexTemplate() { + Map indexTemplateMetadataMap = clusterService().state().metadata().templates(); + assertEquals(1, indexTemplateMetadataMap.size()); + assertEquals(Arrays.asList("pattern-1", "log-*"), indexTemplateMetadataMap.get("my-template").patterns()); + assertEquals( + Settings.builder() // <1> + .put("index.number_of_shards", 3) + .put("index.number_of_replicas", 1) + .build(), + indexTemplateMetadataMap.get("my-template").settings() + ); + } + + private static void putIndexTemplate() { + PutIndexTemplateRequest request = new PutIndexTemplateRequest("my-template"); // <1> + request.patterns(Arrays.asList("pattern-1", "log-*")); // <2> + + request.settings( + Settings.builder() // <1> + .put("index.number_of_shards", 3) + .put("index.number_of_replicas", 1) + ); + assertTrue(client().admin().indices().putTemplate(request).actionGet().isAcknowledged()); + } + + private static void updateIndexBlock(boolean value, String secondIndexName) throws InterruptedException, ExecutionException { + assertAcked( + client().admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(Settings.builder().put(INDEX_READ_ONLY_SETTING.getKey(), value).build(), secondIndexName) + ) + .get() + ); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java index abc0f35d48eab..0bcde4b44c734 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java @@ -16,7 +16,6 @@ import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; -import org.junit.Before; import java.io.IOException; import java.util.ArrayList; @@ -29,7 +28,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 3) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreForceMergeIT extends RemoteStoreBaseIntegTestCase { private static final String INDEX_NAME = "remote-store-test-idx-1"; @@ -41,11 +40,6 @@ protected Collection> nodePlugins() { return Arrays.asList(MockTransportService.TestPlugin.class); } - @Before - public void setup() { - setupRepo(); - } - @Override public Settings indexSettings() { return remoteStoreIndexSettings(0); @@ -97,6 +91,7 @@ private void verifyRestoredData(Map indexStats, long deletedDocs) private void testRestoreWithMergeFlow(int numberOfIterations, boolean invokeFlush, boolean flushAfterMerge, long deletedDocs) throws IOException { + internalCluster().startNodes(3); createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); ensureYellowAndNoInitializingShards(INDEX_NAME); ensureGreen(INDEX_NAME); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 87ec515ffe740..b3b4f8e10fd31 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -8,55 +8,69 @@ package org.opensearch.remotestore; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.admin.indices.recovery.RecoveryResponse; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.opensearch.action.index.IndexResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.BufferedAsyncIOProcessor; +import org.opensearch.core.index.Index; +import org.opensearch.index.IndexService; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.translog.Translog.Durability; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.hamcrest.MatcherAssert; -import org.junit.Before; import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; +import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.index.shard.RemoteStoreRefreshListener.LAST_N_METADATA_FILES_TO_KEEP; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.comparesEqualTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.oneOf; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 0) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreIT extends RemoteStoreBaseIntegTestCase { - private static final String INDEX_NAME = "remote-store-test-idx-1"; + protected final String INDEX_NAME = "remote-store-test-idx-1"; @Override protected Collection> nodePlugins() { return Arrays.asList(MockTransportService.TestPlugin.class); } - @Before - public void setup() { - setupRepo(); - } - @Override public Settings indexSettings() { return remoteStoreIndexSettings(0); } private void testPeerRecovery(int numberOfIterations, boolean invokeFlush) throws Exception { - internalCluster().startDataOnlyNodes(3); + internalCluster().startNodes(3); createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); ensureYellowAndNoInitializingShards(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -108,6 +122,16 @@ public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogFlush() throws Excep testPeerRecovery(randomIntBetween(2, 5), true); } + public void testPeerRecoveryWithLowActivityTimeout() throws Exception { + ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( + Settings.builder() + .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "20kb") + .put(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(), "1s") + ); + internalCluster().client().admin().cluster().updateSettings(req).get(); + testPeerRecovery(randomIntBetween(2, 5), true); + } + public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogNoDataRefresh() throws Exception { testPeerRecovery(1, false); } @@ -117,7 +141,7 @@ public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogRefresh() throws Exc } private void verifyRemoteStoreCleanup() throws Exception { - internalCluster().startDataOnlyNodes(3); + internalCluster().startNodes(3); createIndex(INDEX_NAME, remoteStoreIndexSettings(1)); indexData(5, randomBoolean(), INDEX_NAME); @@ -126,7 +150,7 @@ private void verifyRemoteStoreCleanup() throws Exception { .prepareGetSettings(INDEX_NAME) .get() .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(absolutePath), indexUUID); + Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID); assertTrue(getFileCount(indexPath) > 0); assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); // Delete is async. Give time for it @@ -143,7 +167,7 @@ public void testRemoteTranslogCleanup() throws Exception { } public void testStaleCommitDeletionWithInvokeFlush() throws Exception { - internalCluster().startDataOnlyNodes(1); + internalCluster().startNode(); createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, true, INDEX_NAME); @@ -152,7 +176,7 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { .prepareGetSettings(INDEX_NAME) .get() .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(absolutePath), indexUUID, "/0/segments/metadata"); + Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); // Delete is async. assertBusy(() -> { int actualFileCount = getFileCount(indexPath); @@ -170,7 +194,7 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { } public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { - internalCluster().startDataOnlyNodes(1); + internalCluster().startNode(); createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, false, INDEX_NAME); @@ -179,9 +203,333 @@ public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { .prepareGetSettings(INDEX_NAME) .get() .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(absolutePath), indexUUID, "/0/segments/metadata"); + Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); int actualFileCount = getFileCount(indexPath); // We also allow (numberOfIterations + 1) as index creation also triggers refresh. MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations - 1, numberOfIterations, numberOfIterations + 1))); } + + /** + * Tests that when the index setting is not passed during index creation, the buffer interval picked up is the cluster + * default. + */ + public void testDefaultBufferInterval() throws ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + String clusterManagerName = internalCluster().getClusterManagerName(); + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + assertClusterRemoteBufferInterval(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, dataNode); + + IndexShard indexShard = getIndexShard(dataNode); + assertTrue(indexShard.getTranslogSyncProcessor() instanceof BufferedAsyncIOProcessor); + assertBufferInterval(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, indexShard); + + // Next, we change the default buffer interval and the same should reflect in the buffer interval of the index created + TimeValue clusterBufferInterval = TimeValue.timeValueSeconds(randomIntBetween(100, 200)); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), clusterBufferInterval)) + .get(); + assertBufferInterval(clusterBufferInterval, indexShard); + clearClusterBufferIntervalSetting(clusterManagerName); + } + + /** + * This tests multiple cases where the index setting is passed during the index creation with multiple combinations + * with and without cluster default. + */ + public void testOverriddenBufferInterval() throws ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + String clusterManagerName = internalCluster().getClusterManagerName(); + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + + TimeValue bufferInterval = TimeValue.timeValueSeconds(randomIntBetween(0, 100)); + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), bufferInterval) + .build(); + createIndex(INDEX_NAME, indexSettings); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + IndexShard indexShard = getIndexShard(dataNode); + assertTrue(indexShard.getTranslogSyncProcessor() instanceof BufferedAsyncIOProcessor); + assertBufferInterval(bufferInterval, indexShard); + + // Set the cluster default with a different value, validate that the buffer interval is still the overridden value + TimeValue clusterBufferInterval = TimeValue.timeValueSeconds(randomIntBetween(100, 200)); + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), clusterBufferInterval)) + .get(); + assertBufferInterval(bufferInterval, indexShard); + + // Set the index setting (index.remote_store.translog.buffer_interval) with a different value and validate that + // the buffer interval is updated + bufferInterval = TimeValue.timeValueSeconds(bufferInterval.seconds() + randomIntBetween(1, 100)); + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), bufferInterval) + ) + ) + .get(); + assertBufferInterval(bufferInterval, indexShard); + + // Set the index setting (index.remote_store.translog.buffer_interval) with null and validate the buffer interval + // which will be the cluster default now. + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().putNull(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey()) + ) + ) + .get(); + assertBufferInterval(clusterBufferInterval, indexShard); + clearClusterBufferIntervalSetting(clusterManagerName); + } + + /** + * This tests validation which kicks in during index creation failing creation if the value is less than minimum allowed value. + */ + public void testOverriddenBufferIntervalValidation() { + internalCluster().startClusterManagerOnlyNode(); + TimeValue bufferInterval = TimeValue.timeValueSeconds(-1); + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), bufferInterval) + .build(); + IllegalArgumentException exceptionDuringCreateIndex = assertThrows( + IllegalArgumentException.class, + () -> createIndex(INDEX_NAME, indexSettings) + ); + assertEquals( + "failed to parse value [-1] for setting [index.remote_store.translog.buffer_interval], must be >= [0ms]", + exceptionDuringCreateIndex.getMessage() + ); + } + + /** + * This tests validation of the cluster setting when being set. + */ + public void testClusterBufferIntervalValidation() { + String clusterManagerName = internalCluster().startClusterManagerOnlyNode(); + IllegalArgumentException exception = assertThrows( + IllegalArgumentException.class, + () -> client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(-1)) + ) + .get() + ); + assertEquals( + "failed to parse value [-1] for setting [cluster.remote_store.translog.buffer_interval], must be >= [0ms]", + exception.getMessage() + ); + } + + public void testRequestDurabilityWhenRestrictSettingExplicitFalse() throws ExecutionException, InterruptedException { + // Explicit node settings and request durability + testRestrictSettingFalse(true, Durability.REQUEST); + } + + public void testAsyncDurabilityWhenRestrictSettingExplicitFalse() throws ExecutionException, InterruptedException { + // Explicit node settings and async durability + testRestrictSettingFalse(true, Durability.ASYNC); + } + + public void testRequestDurabilityWhenRestrictSettingImplicitFalse() throws ExecutionException, InterruptedException { + // No node settings and request durability + testRestrictSettingFalse(false, Durability.REQUEST); + } + + public void testAsyncDurabilityWhenRestrictSettingImplicitFalse() throws ExecutionException, InterruptedException { + // No node settings and async durability + testRestrictSettingFalse(false, Durability.ASYNC); + } + + private void testRestrictSettingFalse(boolean setRestrictFalse, Durability durability) throws ExecutionException, InterruptedException { + String clusterManagerName; + if (setRestrictFalse) { + clusterManagerName = internalCluster().startClusterManagerOnlyNode( + Settings.builder().put(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), false).build() + ); + } else { + clusterManagerName = internalCluster().startClusterManagerOnlyNode(); + } + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability) + .build(); + createIndex(INDEX_NAME, indexSettings); + IndexShard indexShard = getIndexShard(dataNode); + assertEquals(durability, indexShard.indexSettings().getTranslogDurability()); + + durability = randomFrom(Durability.values()); + client(clusterManagerName).admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability) + ) + ) + .get(); + assertEquals(durability, indexShard.indexSettings().getTranslogDurability()); + } + + public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() throws ExecutionException, InterruptedException { + String expectedExceptionMsg = + "index setting [index.translog.durability=async] is not allowed as cluster setting [cluster.remote_store.index.restrict.async-durability=true]"; + String clusterManagerName = internalCluster().startClusterManagerOnlyNode( + Settings.builder().put(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), true).build() + ); + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + + // Case 1 - Test create index fails + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Durability.ASYNC) + .build(); + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, indexSettings)); + assertEquals(expectedExceptionMsg, exception.getMessage()); + + // Case 2 - Test update index fails + createIndex(INDEX_NAME); + IndexShard indexShard = getIndexShard(dataNode); + assertEquals(Durability.REQUEST, indexShard.indexSettings().getTranslogDurability()); + exception = assertThrows( + IllegalArgumentException.class, + () -> client(clusterManagerName).admin() + .indices() + .updateSettings(new UpdateSettingsRequest(INDEX_NAME).settings(indexSettings)) + .actionGet() + ); + assertEquals(expectedExceptionMsg, exception.getMessage()); + } + + private IndexShard getIndexShard(String dataNode) throws ExecutionException, InterruptedException { + String clusterManagerName = internalCluster().getClusterManagerName(); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); + GetIndexResponse getIndexResponse = client(clusterManagerName).admin().indices().getIndex(new GetIndexRequest()).get(); + String uuid = getIndexResponse.getSettings().get(INDEX_NAME).get(IndexMetadata.SETTING_INDEX_UUID); + IndexService indexService = indicesService.indexService(new Index(INDEX_NAME, uuid)); + return indexService.getShard(0); + } + + private void assertClusterRemoteBufferInterval(TimeValue expectedBufferInterval, String dataNode) { + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); + assertEquals(expectedBufferInterval, indicesService.getClusterRemoteTranslogBufferInterval()); + } + + private void assertBufferInterval(TimeValue expectedBufferInterval, IndexShard indexShard) { + assertEquals( + expectedBufferInterval, + ((BufferedAsyncIOProcessor) indexShard.getTranslogSyncProcessor()).getBufferIntervalSupplier().get() + ); + } + + private void clearClusterBufferIntervalSetting(String clusterManagerName) { + client(clusterManagerName).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey())) + .get(); + } + + public void testRestoreSnapshotToIndexWithSameNameDifferentUUID() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + List dataNodes = internalCluster().startDataOnlyNodes(2); + + Path absolutePath = randomRepoPath().toAbsolutePath(); + assertAcked( + clusterAdmin().preparePutRepository("test-repo").setType("fs").setSettings(Settings.builder().put("location", absolutePath)) + ); + + logger.info("--> Create index and ingest 50 docs"); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1)); + indexBulk(INDEX_NAME, 50); + flushAndRefresh(INDEX_NAME); + + String originalIndexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + assertNotNull(originalIndexUUID); + assertNotEquals(IndexMetadata.INDEX_UUID_NA_VALUE, originalIndexUUID); + + ensureGreen(); + + logger.info("--> take a snapshot"); + client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setIndices(INDEX_NAME).setWaitForCompletion(true).get(); + + logger.info("--> wipe all indices"); + cluster().wipeIndices(INDEX_NAME); + + logger.info("--> Create index with the same name, different UUID"); + assertAcked( + prepareCreate(INDEX_NAME).setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1)) + ); + + ensureGreen(TimeValue.timeValueSeconds(30), INDEX_NAME); + + String newIndexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + assertNotNull(newIndexUUID); + assertNotEquals(IndexMetadata.INDEX_UUID_NA_VALUE, newIndexUUID); + assertNotEquals(newIndexUUID, originalIndexUUID); + + logger.info("--> close index"); + client().admin().indices().prepareClose(INDEX_NAME).get(); + + logger.info("--> restore all indices from the snapshot"); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .execute() + .actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + + flushAndRefresh(INDEX_NAME); + + ensureGreen(INDEX_NAME); + assertBusy(() -> { + assertHitCount(client(dataNodes.get(0)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); + assertHitCount(client(dataNodes.get(1)).prepareSearch(INDEX_NAME).setSize(0).get(), 50); + }); + } + + public void testNoSearchIdleForAnyReplicaCount() throws ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + String primaryShardNode = internalCluster().startDataOnlyNodes(1).get(0); + + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(INDEX_NAME); + IndexShard indexShard = getIndexShard(primaryShardNode); + assertFalse(indexShard.isSearchIdleSupported()); + + String replicaShardNode = internalCluster().startDataOnlyNodes(1).get(0); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + ensureGreen(INDEX_NAME); + assertFalse(indexShard.isSearchIdleSupported()); + + indexShard = getIndexShard(replicaShardNode); + assertFalse(indexShard.isSearchIdleSupported()); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java index b97e93f323fb2..acdb21d072320 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java @@ -28,9 +28,13 @@ public class RemoteStoreRefreshListenerIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { public void testRemoteRefreshRetryOnFailure() throws Exception { - Path location = randomRepoPath().toAbsolutePath(); setup(location, randomDoubleBetween(0.1, 0.15, true), "metadata", 10L); + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), false)) + .get(); // Here we are having flush/refresh after each iteration of indexing. However, the refresh will not always succeed // due to IOExceptions that are thrown while doing uploadBlobs. @@ -56,7 +60,7 @@ public void testRemoteRefreshRetryOnFailure() throws Exception { logger.info("Local files = {}, Repo files = {}", sortedFilesInLocal, sortedFilesInRepo); assertTrue(filesInRepo.containsAll(filesInLocal)); }, 90, TimeUnit.SECONDS); - deleteRepo(); + cleanupRepo(); } public void testRemoteRefreshSegmentPressureSettingChanged() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java new file mode 100644 index 0000000000000..ef2dcf3217df6 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java @@ -0,0 +1,184 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesAction; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.disruption.NetworkDisruption; +import org.opensearch.test.transport.MockTransportService; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStoreRepositoryRegistrationIT extends RemoteStoreBaseIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + + public void testSingleNodeClusterRepositoryRegistration() throws Exception { + internalCluster().startNode(); + } + + public void testMultiNodeClusterRepositoryRegistration() throws Exception { + internalCluster().startNodes(3); + } + + public void testMultiNodeClusterRepositoryRegistrationWithMultipleClusterManager() throws Exception { + internalCluster().startClusterManagerOnlyNodes(3); + internalCluster().startNodes(3); + } + + public void testMultiNodeClusterActiveClusterManagerShutDown() throws Exception { + internalCluster().startNodes(3); + internalCluster().stopCurrentClusterManagerNode(); + ensureStableCluster(2); + } + + public void testMultiNodeClusterActiveMClusterManagerRestart() throws Exception { + internalCluster().startNodes(3); + String clusterManagerNodeName = internalCluster().getClusterManagerName(); + internalCluster().restartNode(clusterManagerNodeName); + ensureStableCluster(3); + } + + public void testMultiNodeClusterRandomNodeRestart() throws Exception { + internalCluster().startNodes(3); + internalCluster().restartRandomDataNode(); + ensureStableCluster(3); + } + + public void testMultiNodeClusterActiveClusterManagerRecoverNetworkIsolation() { + internalCluster().startClusterManagerOnlyNodes(3); + String dataNode = internalCluster().startNode(); + + NetworkDisruption partition = isolateClusterManagerDisruption(NetworkDisruption.DISCONNECT); + internalCluster().setDisruptionScheme(partition); + + partition.startDisrupting(); + ensureStableCluster(3, dataNode); + partition.stopDisrupting(); + + ensureStableCluster(4); + + internalCluster().clearDisruptionScheme(); + } + + public void testMultiNodeClusterRandomNodeRecoverNetworkIsolation() { + Set nodesInOneSide = internalCluster().startNodes(3).stream().collect(Collectors.toCollection(HashSet::new)); + Set nodesInAnotherSide = internalCluster().startNodes(3).stream().collect(Collectors.toCollection(HashSet::new)); + ensureStableCluster(6); + + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInAnotherSide), + NetworkDisruption.DISCONNECT + ); + internalCluster().setDisruptionScheme(networkDisruption); + + networkDisruption.startDisrupting(); + ensureStableCluster(3, nodesInOneSide.stream().findAny().get()); + networkDisruption.stopDisrupting(); + + ensureStableCluster(6); + + internalCluster().clearDisruptionScheme(); + } + + public void testMultiNodeClusterRandomNodeRecoverNetworkIsolationPostNonRestrictedSettingsUpdate() { + Set nodesInOneSide = internalCluster().startNodes(3).stream().collect(Collectors.toCollection(HashSet::new)); + Set nodesInAnotherSide = internalCluster().startNodes(3).stream().collect(Collectors.toCollection(HashSet::new)); + ensureStableCluster(6); + + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInAnotherSide), + NetworkDisruption.DISCONNECT + ); + internalCluster().setDisruptionScheme(networkDisruption); + + networkDisruption.startDisrupting(); + + final Client client = client(nodesInOneSide.iterator().next()); + RepositoryMetadata repositoryMetadata = client.admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME) + .get() + .repositories() + .get(0); + Settings.Builder updatedSettings = Settings.builder().put(repositoryMetadata.settings()).put("chunk_size", new ByteSizeValue(20)); + updatedSettings.remove("system_repository"); + + client.admin() + .cluster() + .preparePutRepository(repositoryMetadata.name()) + .setType(repositoryMetadata.type()) + .setSettings(updatedSettings) + .get(); + + ensureStableCluster(3, nodesInOneSide.stream().findAny().get()); + networkDisruption.stopDisrupting(); + + ensureStableCluster(6); + + internalCluster().clearDisruptionScheme(); + } + + public void testNodeRestartPostNonRestrictedSettingsUpdate() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startNodes(3); + + final Client client = client(); + RepositoryMetadata repositoryMetadata = client.admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME) + .get() + .repositories() + .get(0); + Settings.Builder updatedSettings = Settings.builder().put(repositoryMetadata.settings()).put("chunk_size", new ByteSizeValue(20)); + updatedSettings.remove("system_repository"); + + client.admin() + .cluster() + .preparePutRepository(repositoryMetadata.name()) + .setType(repositoryMetadata.type()) + .setSettings(updatedSettings) + .get(); + + internalCluster().restartRandomDataNode(); + + ensureStableCluster(4); + } + + public void testSystemRepositorySettingIsHiddenForGetRepositoriesRequest() throws IOException { + GetRepositoriesRequest request = new GetRepositoriesRequest(new String[] { REPOSITORY_NAME }); + GetRepositoriesResponse repositoriesResponse = client().execute(GetRepositoriesAction.INSTANCE, request).actionGet(); + XContentBuilder builder = MediaTypeRegistry.contentBuilder(randomFrom(XContentType.JSON)); + XContentBuilder xContentBuilder = repositoriesResponse.toXContent(builder, ToXContent.EMPTY_PARAMS); + repositoriesResponse = GetRepositoriesResponse.fromXContent(createParser(xContentBuilder)); + assertEquals(false, SYSTEM_REPOSITORY_SETTING.get(repositoriesResponse.repositories().get(0).settings())); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java index e9d8933961073..7626e3dba6424 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java @@ -10,92 +10,40 @@ import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse; -import org.opensearch.action.index.IndexResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; -import org.opensearch.plugins.Plugin; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.transport.MockTransportService; -import org.junit.Before; import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; +import java.nio.file.Path; import java.util.HashMap; +import java.util.Locale; import java.util.Map; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.greaterThan; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 0) -public class RemoteStoreRestoreIT extends RemoteStoreBaseIntegTestCase { - private static final String INDEX_NAME = "remote-store-test-idx-1"; - private static final String INDEX_NAMES = "test-remote-store-1,test-remote-store-2,remote-store-test-index-1,remote-store-test-index-2"; - private static final String INDEX_NAMES_WILDCARD = "test-remote-store-*,remote-store-test-index-*"; - private static final String TOTAL_OPERATIONS = "total-operations"; - private static final String REFRESHED_OR_FLUSHED_OPERATIONS = "refreshed-or-flushed-operations"; - private static final String MAX_SEQ_NO_TOTAL = "max-seq-no-total"; - - @Override - public Settings indexSettings() { - return remoteStoreIndexSettings(0); - } - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); - } - - @Before - public void setup() { - setupRepo(); - } - - private void restore(String... indices) { - boolean restoreAllShards = randomBoolean(); - if (restoreAllShards) { - assertAcked(client().admin().indices().prepareClose(indices)); - } - client().admin() - .cluster() - .restoreRemoteStore( - new RestoreRemoteStoreRequest().indices(indices).restoreAllShards(restoreAllShards), - PlainActionFuture.newFuture() - ); - } - - private void verifyRestoredData(Map indexStats, String indexName) { - // This is required to get updated number from already active shards which were not restored - refresh(indexName); - ensureYellowAndNoInitializingShards(indexName); - ensureGreen(indexName); - assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(TOTAL_OPERATIONS)); - IndexResponse response = indexSingleDoc(indexName); - if (indexStats.containsKey(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id())) { - assertEquals(indexStats.get(MAX_SEQ_NO_TOTAL + "-shard-" + response.getShardId().id()) + 1, response.getSeqNo()); - } - refresh(indexName); - assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(TOTAL_OPERATIONS) + 1); - } - - private void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, String indices, int replicaCount, int shardCount) { - internalCluster().startClusterManagerOnlyNodes(numClusterManagerNodes); - internalCluster().startDataOnlyNodes(numDataOnlyNodes); - for (String index : indices.split(",")) { - createIndex(index, remoteStoreIndexSettings(replicaCount, shardCount)); - ensureYellowAndNoInitializingShards(index); - ensureGreen(index); - } - } +public class RemoteStoreRestoreIT extends BaseRemoteStoreRestoreIT { /** * Simulates all data restored using Remote Translog Store. * @throws IOException IO Exception. */ - public void testRemoteTranslogRestoreWithNoDataPostCommit() throws IOException { + public void testRemoteTranslogRestoreWithNoDataPostCommit() throws Exception { testRestoreFlow(1, true, randomIntBetween(1, 5)); } @@ -103,7 +51,7 @@ public void testRemoteTranslogRestoreWithNoDataPostCommit() throws IOException { * Simulates all data restored using Remote Translog Store. * @throws IOException IO Exception. */ - public void testRemoteTranslogRestoreWithNoDataPostRefresh() throws IOException { + public void testRemoteTranslogRestoreWithNoDataPostRefresh() throws Exception { testRestoreFlow(1, false, randomIntBetween(1, 5)); } @@ -112,7 +60,7 @@ public void testRemoteTranslogRestoreWithNoDataPostRefresh() throws IOException * and unrefreshed data restored using Remote Translog Store. * @throws IOException IO Exception. */ - public void testRemoteTranslogRestoreWithRefreshedData() throws IOException { + public void testRemoteTranslogRestoreWithRefreshedData() throws Exception { testRestoreFlow(randomIntBetween(2, 5), false, randomIntBetween(1, 5)); } @@ -121,7 +69,7 @@ public void testRemoteTranslogRestoreWithRefreshedData() throws IOException { * and unrefreshed data restored using Remote Translog Store. * @throws IOException IO Exception. */ - public void testRemoteTranslogRestoreWithCommittedData() throws IOException { + public void testRemoteTranslogRestoreWithCommittedData() throws Exception { testRestoreFlow(randomIntBetween(2, 5), true, randomIntBetween(1, 5)); } @@ -129,8 +77,7 @@ public void testRemoteTranslogRestoreWithCommittedData() throws IOException { * Simulates all data restored using Remote Translog Store. * @throws IOException IO Exception. */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8479") - public void testRTSRestoreWithNoDataPostCommitPrimaryReplicaDown() throws IOException { + public void testRTSRestoreWithNoDataPostCommitPrimaryReplicaDown() throws Exception { testRestoreFlowBothPrimaryReplicasDown(1, true, randomIntBetween(1, 5)); } @@ -138,8 +85,7 @@ public void testRTSRestoreWithNoDataPostCommitPrimaryReplicaDown() throws IOExce * Simulates all data restored using Remote Translog Store. * @throws IOException IO Exception. */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8479") - public void testRTSRestoreWithNoDataPostRefreshPrimaryReplicaDown() throws IOException { + public void testRTSRestoreWithNoDataPostRefreshPrimaryReplicaDown() throws Exception { testRestoreFlowBothPrimaryReplicasDown(1, false, randomIntBetween(1, 5)); } @@ -148,8 +94,7 @@ public void testRTSRestoreWithNoDataPostRefreshPrimaryReplicaDown() throws IOExc * and unrefreshed data restored using Remote Translog Store. * @throws IOException IO Exception. */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8479") - public void testRTSRestoreWithRefreshedDataPrimaryReplicaDown() throws IOException { + public void testRTSRestoreWithRefreshedDataPrimaryReplicaDown() throws Exception { testRestoreFlowBothPrimaryReplicasDown(randomIntBetween(2, 5), false, randomIntBetween(1, 5)); } @@ -158,12 +103,11 @@ public void testRTSRestoreWithRefreshedDataPrimaryReplicaDown() throws IOExcepti * and unrefreshed data restored using Remote Translog Store. * @throws IOException IO Exception. */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8479") - public void testRTSRestoreWithCommittedDataPrimaryReplicaDown() throws IOException { + public void testRTSRestoreWithCommittedDataPrimaryReplicaDown() throws Exception { testRestoreFlowBothPrimaryReplicasDown(randomIntBetween(2, 5), true, randomIntBetween(1, 5)); } - private void restoreAndVerify(int shardCount, int replicaCount, Map indexStats) { + private void restoreAndVerify(int shardCount, int replicaCount, Map indexStats) throws Exception { restore(INDEX_NAME); ensureGreen(INDEX_NAME); // This is required to get updated number from already active shards which were not restored @@ -178,8 +122,8 @@ private void restoreAndVerify(int shardCount, int replicaCount, Map indexStats = indexData(numberOfIterations, invokeFlush, INDEX_NAME); assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); @@ -197,10 +141,10 @@ private void testRestoreFlow(int numberOfIterations, boolean invokeFlush, int sh * @param invokeFlush If true, a flush is invoked. Otherwise, a refresh is invoked. * @throws IOException IO Exception. */ - private void testRestoreFlowBothPrimaryReplicasDown(int numberOfIterations, boolean invokeFlush, int shardCount) throws IOException { + private void testRestoreFlowBothPrimaryReplicasDown(int numberOfIterations, boolean invokeFlush, int shardCount) throws Exception { prepareCluster(1, 2, INDEX_NAME, 1, shardCount); Map indexStats = indexData(numberOfIterations, invokeFlush, INDEX_NAME); - assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); + assertEquals(shardCount * 2, getNumShards(INDEX_NAME).totalNumShards); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNodeName(INDEX_NAME))); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); @@ -216,14 +160,14 @@ private void testRestoreFlowBothPrimaryReplicasDown(int numberOfIterations, bool * @param invokeFlush If true, a flush is invoked. Otherwise, a refresh is invoked. * @throws IOException IO Exception. */ - private void testRestoreFlowMultipleIndices(int numberOfIterations, boolean invokeFlush, int shardCount) throws IOException { + private void testRestoreFlowMultipleIndices(int numberOfIterations, boolean invokeFlush, int shardCount) throws Exception { prepareCluster(1, 3, INDEX_NAMES, 1, shardCount); String[] indices = INDEX_NAMES.split(","); Map> indicesStats = new HashMap<>(); for (String index : indices) { Map indexStats = indexData(numberOfIterations, invokeFlush, index); indicesStats.put(index, indexStats); - assertEquals(shardCount, getNumShards(index).totalNumShards); + assertEquals(shardCount * 2, getNumShards(index).totalNumShards); } for (String index : indices) { @@ -254,14 +198,14 @@ private void testRestoreFlowMultipleIndices(int numberOfIterations, boolean invo ); ensureGreen(indices); for (String index : indices) { - assertEquals(shardCount, getNumShards(index).totalNumShards); + assertEquals(shardCount * 2, getNumShards(index).totalNumShards); verifyRestoredData(indicesStats.get(index), index); } } public void testRestoreFlowAllShardsNoRedIndex() throws InterruptedException { int shardCount = randomIntBetween(1, 5); - prepareCluster(0, 3, INDEX_NAME, 0, shardCount); + prepareCluster(1, 3, INDEX_NAME, 0, shardCount); indexData(randomIntBetween(2, 5), true, INDEX_NAME); assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); @@ -275,9 +219,9 @@ public void testRestoreFlowAllShardsNoRedIndex() throws InterruptedException { } } - public void testRestoreFlowNoRedIndex() { + public void testRestoreFlowNoRedIndex() throws Exception { int shardCount = randomIntBetween(1, 5); - prepareCluster(0, 3, INDEX_NAME, 0, shardCount); + prepareCluster(1, 3, INDEX_NAME, 0, shardCount); Map indexStats = indexData(randomIntBetween(2, 5), true, INDEX_NAME); assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); @@ -297,7 +241,7 @@ public void testRestoreFlowNoRedIndex() { * @throws IOException IO Exception. */ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8480") - public void testRTSRestoreWithCommittedDataMultipleIndicesPatterns() throws IOException { + public void testRTSRestoreWithCommittedDataMultipleIndicesPatterns() throws Exception { testRestoreFlowMultipleIndices(2, true, randomIntBetween(1, 5)); } @@ -308,7 +252,7 @@ public void testRTSRestoreWithCommittedDataMultipleIndicesPatterns() throws IOEx * @throws IOException IO Exception. */ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8480") - public void testRTSRestoreWithCommittedDataDefaultAllIndices() throws IOException { + public void testRTSRestoreWithCommittedDataDefaultAllIndices() throws Exception { int shardCount = randomIntBetween(1, 5); prepareCluster(1, 3, INDEX_NAMES, 1, shardCount); String[] indices = INDEX_NAMES.split(","); @@ -349,7 +293,7 @@ public void testRTSRestoreWithCommittedDataDefaultAllIndices() throws IOExceptio * with only some of the remote-enabled red indices requested for the restore. * @throws IOException IO Exception. */ - public void testRTSRestoreWithCommittedDataNotAllRedRemoteIndices() throws IOException { + public void testRTSRestoreWithCommittedDataNotAllRedRemoteIndices() throws Exception { int shardCount = randomIntBetween(1, 5); prepareCluster(1, 3, INDEX_NAMES, 0, shardCount); String[] indices = INDEX_NAMES.split(","); @@ -397,7 +341,7 @@ public void testRTSRestoreWithCommittedDataNotAllRedRemoteIndices() throws IOExc * @throws IOException IO Exception. */ @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8480") - public void testRTSRestoreWithCommittedDataExcludeIndicesPatterns() throws IOException { + public void testRTSRestoreWithCommittedDataExcludeIndicesPatterns() throws Exception { int shardCount = randomIntBetween(1, 5); prepareCluster(1, 3, INDEX_NAMES, 1, shardCount); String[] indices = INDEX_NAMES.split(","); @@ -446,9 +390,76 @@ public void testRTSRestoreWithCommittedDataExcludeIndicesPatterns() throws IOExc * when the index has no data. * @throws IOException IO Exception. */ - public void testRTSRestoreDataOnlyInTranslog() throws IOException { + public void testRTSRestoreDataOnlyInTranslog() throws Exception { testRestoreFlow(0, true, randomIntBetween(1, 5)); } + public void testRateLimitedRemoteDownloads() throws Exception { + clusterSettingsSuppliedByTest = true; + int shardCount = randomIntBetween(1, 3); + Path segmentRepoPath = randomRepoPath(); + Path tlogRepoPath = randomRepoPath(); + prepareCluster( + 1, + 3, + INDEX_NAME, + 0, + shardCount, + buildRemoteStoreNodeAttributes(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, tlogRepoPath, true) + ); + + // validate inplace repository metadata update + ClusterService clusterService = internalCluster().getInstance(ClusterService.class); + DiscoveryNode node = clusterService.localNode(); + String settingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + REPOSITORY_NAME + ); + Map settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> node.getAttributes().get(key))); + Settings.Builder settings = Settings.builder(); + settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue())); + settings.put("location", segmentRepoPath).put("max_remote_download_bytes_per_sec", 4, ByteSizeUnit.KB); + + assertAcked(client().admin().cluster().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(settings).get()); + + for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + Repository segmentRepo = repositoriesService.repository(REPOSITORY_NAME); + assertEquals("4096b", segmentRepo.getMetadata().settings().get("max_remote_download_bytes_per_sec")); + } + + Map indexStats = indexData(5, false, INDEX_NAME); + assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); + ensureRed(INDEX_NAME); + restore(INDEX_NAME); + assertBusy(() -> { + long downloadPauseTime = 0L; + for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + downloadPauseTime += repositoriesService.repository(REPOSITORY_NAME).getRemoteDownloadThrottleTimeInNanos(); + } + assertThat(downloadPauseTime, greaterThan(TimeValue.timeValueSeconds(randomIntBetween(3, 5)).nanos())); + }, 30, TimeUnit.SECONDS); + // Waiting for extended period for green state so that rate limit does not cause flakiness + ensureGreen(TimeValue.timeValueSeconds(120), INDEX_NAME); + // This is required to get updated number from already active shards which were not restored + assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); + assertEquals(0, getNumShards(INDEX_NAME).numReplicas); + verifyRestoredData(indexStats, INDEX_NAME); + + // revert repo metadata to pass asserts on repo metadata vs. node attrs during teardown + // https://github.com/opensearch-project/OpenSearch/pull/9569#discussion_r1345668700 + settings.remove("max_remote_download_bytes_per_sec"); + assertAcked(client().admin().cluster().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(settings).get()); + for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + Repository segmentRepo = repositoriesService.repository(REPOSITORY_NAME); + assertNull(segmentRepo.getMetadata().settings().get("max_remote_download_bytes_per_sec")); + } + } + // TODO: Restore flow - index aliases } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsFromNodesStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsFromNodesStatsIT.java new file mode 100644 index 0000000000000..6e796bdae5a4a --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsFromNodesStatsIT.java @@ -0,0 +1,209 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.remote.RemoteSegmentStats; +import org.opensearch.index.translog.RemoteTranslogStats; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; + +import java.util.concurrent.TimeUnit; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStoreStatsFromNodesStatsIT extends RemoteStoreBaseIntegTestCase { + private static final String INDEX_NAME = "remote-index-1"; + private static final int DATA_NODE_COUNT = 2; + private static final int CLUSTER_MANAGER_NODE_COUNT = 3; + + @Before + public void setup() { + setupCustomCluster(); + } + + private void setupCustomCluster() { + internalCluster().startClusterManagerOnlyNodes(CLUSTER_MANAGER_NODE_COUNT); + internalCluster().startDataOnlyNodes(DATA_NODE_COUNT); + ensureStableCluster(DATA_NODE_COUNT + CLUSTER_MANAGER_NODE_COUNT); + } + + /** + * - Creates two indices with single primary shard, pinned to a single node. + * - Index documents in both of them and forces a fresh for both + * - Polls the _remotestore/stats API for individual index level stats + * - Adds up requisite fields from the API output, repeats this for the 2nd index + * - Polls _nodes/stats and verifies that the total values at node level adds up + * to the values capture in the previous step + */ + public void testNodesStatsParityWithOnlyPrimaryShards() { + String[] dataNodes = internalCluster().getDataNodeNames().toArray(String[]::new); + String randomDataNode = dataNodes[randomIntBetween(0, dataNodes.length - 1)]; + String firstIndex = INDEX_NAME + "1"; + String secondIndex = INDEX_NAME + "2"; + + // Create first index + createIndex( + firstIndex, + Settings.builder().put(remoteStoreIndexSettings(0, 1)).put("index.routing.allocation.require._name", randomDataNode).build() + ); + ensureGreen(firstIndex); + indexSingleDoc(firstIndex, true); + + // Create second index + createIndex( + secondIndex, + Settings.builder().put(remoteStoreIndexSettings(0, 1)).put("index.routing.allocation.require._name", randomDataNode).build() + ); + ensureGreen(secondIndex); + indexSingleDoc(secondIndex, true); + + assertNodeStatsParityOnNode(randomDataNode, firstIndex, secondIndex); + } + + /** + * - Creates two indices with single primary shard and single replica + * - Index documents in both of them and forces a fresh for both + * - Polls the _remotestore/stats API for individual index level stats + * - Adds up requisite fields from the API output for both indices + * - Polls _nodes/stats and verifies that the total values at node level adds up + * to the values capture in the previous step + * - Repeats the above 3 steps for the second node + */ + public void testNodesStatsParityWithReplicaShards() throws Exception { + String firstIndex = INDEX_NAME + "1"; + String secondIndex = INDEX_NAME + "2"; + + createIndex(firstIndex, Settings.builder().put(remoteStoreIndexSettings(1, 1)).build()); + ensureGreen(firstIndex); + indexSingleDoc(firstIndex, true); + + // Create second index + createIndex(secondIndex, Settings.builder().put(remoteStoreIndexSettings(1, 1)).build()); + ensureGreen(secondIndex); + indexSingleDoc(secondIndex, true); + + assertBusy(() -> assertNodeStatsParityAcrossNodes(firstIndex, secondIndex), 15, TimeUnit.SECONDS); + } + + /** + * Ensures that node stats shows 0 values for dedicated cluster manager nodes + * since cluster manager nodes does not participate in indexing + */ + public void testZeroRemoteStatsOnNodesStatsForClusterManager() { + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(INDEX_NAME); + indexSingleDoc(INDEX_NAME); + refresh(INDEX_NAME); + + NodesStatsResponse nodesStatsResponseForClusterManager = client().admin() + .cluster() + .prepareNodesStats(internalCluster().getClusterManagerName()) + .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true).set(CommonStatsFlags.Flag.Translog, true)) + .get(); + + assertTrue( + nodesStatsResponseForClusterManager.getNodes().get(0).getNode().isClusterManagerNode() + && !nodesStatsResponseForClusterManager.getNodes().get(0).getNode().isDataNode() + ); + assertZeroRemoteSegmentStats( + nodesStatsResponseForClusterManager.getNodes().get(0).getIndices().getSegments().getRemoteSegmentStats() + ); + assertZeroRemoteTranslogStats( + nodesStatsResponseForClusterManager.getNodes().get(0).getIndices().getTranslog().getRemoteTranslogStats() + ); + + NodesStatsResponse nodesStatsResponseForDataNode = client().admin() + .cluster() + .prepareNodesStats(primaryNodeName(INDEX_NAME)) + .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true).set(CommonStatsFlags.Flag.Translog, true)) + .get(); + + assertTrue(nodesStatsResponseForDataNode.getNodes().get(0).getNode().isDataNode()); + RemoteSegmentStats remoteSegmentStats = nodesStatsResponseForDataNode.getNodes() + .get(0) + .getIndices() + .getSegments() + .getRemoteSegmentStats(); + assertTrue(remoteSegmentStats.getUploadBytesStarted() > 0); + assertTrue(remoteSegmentStats.getUploadBytesSucceeded() > 0); + + RemoteTranslogStats remoteTranslogStats = nodesStatsResponseForDataNode.getNodes() + .get(0) + .getIndices() + .getTranslog() + .getRemoteTranslogStats(); + assertTrue(remoteTranslogStats.getUploadBytesStarted() > 0); + assertTrue(remoteTranslogStats.getUploadBytesSucceeded() > 0); + } + + private void assertZeroRemoteSegmentStats(RemoteSegmentStats remoteSegmentStats) { + // Compare with fresh object because all values default to 0 in default fresh object + assertEquals(new RemoteSegmentStats(), remoteSegmentStats); + } + + private void assertZeroRemoteTranslogStats(RemoteTranslogStats remoteTranslogStats) { + // Compare with fresh object because all values default to 0 in default fresh object + assertEquals(new RemoteTranslogStats(), remoteTranslogStats); + } + + private static void assertNodeStatsParityAcrossNodes(String... indices) { + for (String dataNode : internalCluster().getDataNodeNames()) { + assertNodeStatsParityOnNode(dataNode, indices); + } + } + + private static void assertNodeStatsParityOnNode(String dataNode, String... indices) { + RemoteSegmentStats remoteSegmentStatsCumulative = new RemoteSegmentStats(); + RemoteTranslogStats remoteTranslogStatsCumulative = new RemoteTranslogStats(); + for (String index : indices) { + // Fetch _remotestore/stats + RemoteStoreStatsResponse remoteStoreStats = client(dataNode).admin() + .cluster() + .prepareRemoteStoreStats(index, "0") + .setLocal(true) + .get(); + remoteSegmentStatsCumulative.add(new RemoteSegmentStats(remoteStoreStats.getRemoteStoreStats()[0].getSegmentStats())); + remoteTranslogStatsCumulative.add(new RemoteTranslogStats(remoteStoreStats.getRemoteStoreStats()[0].getTranslogStats())); + } + + // Fetch _nodes/stats + NodesStatsResponse nodesStatsResponse = client().admin() + .cluster() + .prepareNodesStats(dataNode) + .setIndices(new CommonStatsFlags().set(CommonStatsFlags.Flag.Segments, true).set(CommonStatsFlags.Flag.Translog, true)) + .get(); + + // assert segment stats + RemoteSegmentStats remoteSegmentStatsFromNodesStats = nodesStatsResponse.getNodes() + .get(0) + .getIndices() + .getSegments() + .getRemoteSegmentStats(); + assertEquals(remoteSegmentStatsCumulative, remoteSegmentStatsFromNodesStats); + // Ensure that total upload time has non-zero value if there has been segments uploaded from the node + if (remoteSegmentStatsCumulative.getUploadBytesStarted() > 0) { + assertTrue(remoteSegmentStatsCumulative.getTotalUploadTime() > 0); + } + // Ensure that total download time has non-zero value if there has been segments downloaded to the node + if (remoteSegmentStatsCumulative.getDownloadBytesStarted() > 0) { + assertTrue(remoteSegmentStatsCumulative.getTotalDownloadTime() > 0); + } + + // assert translog stats + RemoteTranslogStats remoteTranslogStatsFromNodesStats = nodesStatsResponse.getNodes() + .get(0) + .getIndices() + .getTranslog() + .getRemoteTranslogStats(); + assertEquals(remoteTranslogStatsCumulative, remoteTranslogStatsFromNodesStats); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 0671cd5b2da28..cb5e2e911705b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -15,6 +15,8 @@ import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.FollowersChecker; +import org.opensearch.cluster.coordination.LeaderChecker; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; @@ -22,15 +24,21 @@ import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; -import org.junit.Before; +import org.opensearch.test.disruption.NetworkDisruption; +import org.opensearch.test.transport.MockTransportService; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; import java.util.List; import java.util.Locale; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -39,17 +47,22 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 3) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreStatsIT extends RemoteStoreBaseIntegTestCase { private static final String INDEX_NAME = "remote-store-test-idx-1"; - @Before + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + public void setup() { - setupRepo(); + internalCluster().startNodes(3); } public void testStatsResponseFromAllNodes() { + setup(); // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes // during this time frame. This ensures that the segment upload has started. @@ -75,9 +88,14 @@ public void testStatsResponseFromAllNodes() { .filter(stat -> indexShardId.equals(stat.getSegmentStats().shardId.toString())) .collect(Collectors.toList()); assertEquals(1, matches.size()); - RemoteSegmentTransferTracker.Stats stats = matches.get(0).getSegmentStats(); - validateSegmentUploadStats(stats); - assertEquals(0, stats.directoryFileTransferTrackerStats.transferredBytesStarted); + + RemoteSegmentTransferTracker.Stats segmentStats = matches.get(0).getSegmentStats(); + validateSegmentUploadStats(segmentStats); + assertEquals(0, segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted); + + RemoteTranslogTransferTracker.Stats translogStats = matches.get(0).getTranslogStats(); + assertNonZeroTranslogUploadStatsNoFailures(translogStats); + assertZeroTranslogDownloadStats(translogStats); } // Step 3 - Enable replicas on the existing indices and ensure that download @@ -95,19 +113,27 @@ public void testStatsResponseFromAllNodes() { for (RemoteStoreStats stat : matches) { ShardRouting routing = stat.getShardRouting(); validateShardRouting(routing); - RemoteSegmentTransferTracker.Stats stats = stat.getSegmentStats(); + RemoteSegmentTransferTracker.Stats segmentStats = stat.getSegmentStats(); + RemoteTranslogTransferTracker.Stats translogStats = stat.getTranslogStats(); if (routing.primary()) { - validateSegmentUploadStats(stats); - assertEquals(0, stats.directoryFileTransferTrackerStats.transferredBytesStarted); + validateSegmentUploadStats(segmentStats); + assertEquals(0, segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted); + + assertNonZeroTranslogUploadStatsNoFailures(translogStats); + assertZeroTranslogDownloadStats(translogStats); } else { - validateSegmentDownloadStats(stats); - assertEquals(0, stats.totalUploadsStarted); + validateSegmentDownloadStats(segmentStats); + assertEquals(0, segmentStats.totalUploadsStarted); + + assertZeroTranslogUploadStats(translogStats); + assertZeroTranslogDownloadStats(translogStats); } } } } public void testStatsResponseAllShards() { + setup(); // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes // during this time frame. This ensures that the segment upload has started. @@ -130,10 +156,15 @@ public void testStatsResponseAllShards() { RemoteStoreStatsResponse response = remoteStoreStatsRequestBuilder.get(); assertEquals(3, response.getSuccessfulShards()); assertTrue(response.getRemoteStoreStats() != null && response.getRemoteStoreStats().length == 3); + RemoteSegmentTransferTracker.Stats segmentStats = response.getRemoteStoreStats()[0].getSegmentStats(); validateSegmentUploadStats(segmentStats); assertEquals(0, segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted); + RemoteTranslogTransferTracker.Stats translogStats = response.getRemoteStoreStats()[0].getTranslogStats(); + assertNonZeroTranslogUploadStatsNoFailures(translogStats); + assertZeroTranslogDownloadStats(translogStats); + // Step 3 - Enable replicas on the existing indices and ensure that download // stats are being populated as well changeReplicaCountAndEnsureGreen(1); @@ -144,18 +175,26 @@ public void testStatsResponseAllShards() { ShardRouting routing = stat.getShardRouting(); validateShardRouting(routing); segmentStats = stat.getSegmentStats(); + translogStats = stat.getTranslogStats(); if (routing.primary()) { validateSegmentUploadStats(segmentStats); assertEquals(0, segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted); + + assertNonZeroTranslogUploadStatsNoFailures(translogStats); + assertZeroTranslogDownloadStats(translogStats); } else { validateSegmentDownloadStats(segmentStats); assertEquals(0, segmentStats.totalUploadsStarted); + + assertZeroTranslogUploadStats(translogStats); + assertZeroTranslogDownloadStats(translogStats); } } } public void testStatsResponseFromLocalNode() { + setup(); // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes // during this time frame. This ensures that the segment upload has started. @@ -182,6 +221,10 @@ public void testStatsResponseFromLocalNode() { RemoteSegmentTransferTracker.Stats segmentStats = response.getRemoteStoreStats()[0].getSegmentStats(); validateSegmentUploadStats(segmentStats); assertEquals(0, segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted); + + RemoteTranslogTransferTracker.Stats translogStats = response.getRemoteStoreStats()[0].getTranslogStats(); + assertNonZeroTranslogUploadStatsNoFailures(translogStats); + assertZeroTranslogDownloadStats(translogStats); } changeReplicaCountAndEnsureGreen(1); for (String node : nodes) { @@ -195,19 +238,27 @@ public void testStatsResponseFromLocalNode() { for (RemoteStoreStats stat : response.getRemoteStoreStats()) { ShardRouting routing = stat.getShardRouting(); validateShardRouting(routing); - RemoteSegmentTransferTracker.Stats stats = stat.getSegmentStats(); + RemoteSegmentTransferTracker.Stats segmentStats = stat.getSegmentStats(); + RemoteTranslogTransferTracker.Stats translogStats = stat.getTranslogStats(); if (routing.primary()) { - validateSegmentUploadStats(stats); - assertEquals(0, stats.directoryFileTransferTrackerStats.transferredBytesStarted); + validateSegmentUploadStats(segmentStats); + assertEquals(0, segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted); + + assertNonZeroTranslogUploadStatsNoFailures(translogStats); + assertZeroTranslogDownloadStats(translogStats); } else { - validateSegmentDownloadStats(stats); - assertEquals(0, stats.totalUploadsStarted); + validateSegmentDownloadStats(segmentStats); + assertEquals(0, segmentStats.totalUploadsStarted); + + assertZeroTranslogUploadStats(translogStats); + assertZeroTranslogDownloadStats(translogStats); } } } } public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exception { + setup(); // Scenario: // - Create index with single primary and single replica shard // - Disable Refresh Interval for the index @@ -297,6 +348,7 @@ public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exce } public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() throws Exception { + setup(); // Scenario: // - Create index with single primary and N-1 replica shards (N = no of data nodes) // - Disable Refresh Interval for the index @@ -388,6 +440,7 @@ public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() thr } public void testStatsOnShardRelocation() { + setup(); // Scenario: // - Create index with single primary and single replica shard // - Index documents @@ -443,6 +496,7 @@ public void testStatsOnShardRelocation() { } public void testStatsOnShardUnassigned() throws IOException { + setup(); // Scenario: // - Create index with single primary and two replica shard // - Index documents @@ -469,6 +523,7 @@ public void testStatsOnShardUnassigned() throws IOException { } public void testStatsOnRemoteStoreRestore() throws IOException { + setup(); // Creating an index with primary shard count == total nodes in cluster and 0 replicas int dataNodeCount = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes(); createIndex(INDEX_NAME, remoteStoreIndexSettings(0, dataNodeCount)); @@ -499,19 +554,24 @@ public void testStatsOnRemoteStoreRestore() throws IOException { RemoteStoreStatsResponse remoteStoreStatsResponse = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); Arrays.stream(remoteStoreStatsResponse.getRemoteStoreStats()).forEach(statObject -> { - RemoteSegmentTransferTracker.Stats segmentTracker = statObject.getSegmentStats(); + RemoteSegmentTransferTracker.Stats segmentStats = statObject.getSegmentStats(); // Assert that we have both upload and download stats for the index assertTrue( - segmentTracker.totalUploadsStarted > 0 && segmentTracker.totalUploadsSucceeded > 0 && segmentTracker.totalUploadsFailed == 0 + segmentStats.totalUploadsStarted > 0 && segmentStats.totalUploadsSucceeded > 0 && segmentStats.totalUploadsFailed == 0 ); assertTrue( - segmentTracker.directoryFileTransferTrackerStats.transferredBytesStarted > 0 - && segmentTracker.directoryFileTransferTrackerStats.transferredBytesSucceeded > 0 + segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted > 0 + && segmentStats.directoryFileTransferTrackerStats.transferredBytesSucceeded > 0 ); + + RemoteTranslogTransferTracker.Stats translogStats = statObject.getTranslogStats(); + assertNonZeroTranslogUploadStatsNoFailures(translogStats); + assertNonZeroTranslogDownloadStats(translogStats); }); } public void testNonZeroPrimaryStatsOnNewlyCreatedIndexWithZeroDocs() throws Exception { + setup(); // Create an index with one primary and one replica shard createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 1)); ensureGreen(INDEX_NAME); @@ -528,23 +588,104 @@ public void testNonZeroPrimaryStatsOnNewlyCreatedIndexWithZeroDocs() throws Exce .get() .getRemoteStoreStats(); Arrays.stream(remoteStoreStats).forEach(statObject -> { - RemoteSegmentTransferTracker.Stats segmentTracker = statObject.getSegmentStats(); + RemoteSegmentTransferTracker.Stats segmentStats = statObject.getSegmentStats(); + RemoteTranslogTransferTracker.Stats translogStats = statObject.getTranslogStats(); if (statObject.getShardRouting().primary()) { assertTrue( - segmentTracker.totalUploadsSucceeded == 1 - && segmentTracker.totalUploadsStarted == segmentTracker.totalUploadsSucceeded - && segmentTracker.totalUploadsFailed == 0 + segmentStats.totalUploadsSucceeded == 1 + && segmentStats.totalUploadsStarted == segmentStats.totalUploadsSucceeded + && segmentStats.totalUploadsFailed == 0 ); + // On primary shard creation, we upload to remote translog post primary mode activation. + // This changes upload stats to non-zero for primary shard. + assertNonZeroTranslogUploadStatsNoFailures(translogStats); } else { assertTrue( - segmentTracker.directoryFileTransferTrackerStats.transferredBytesStarted == 0 - && segmentTracker.directoryFileTransferTrackerStats.transferredBytesSucceeded == 0 + segmentStats.directoryFileTransferTrackerStats.transferredBytesStarted == 0 + && segmentStats.directoryFileTransferTrackerStats.transferredBytesSucceeded == 0 ); + assertZeroTranslogUploadStats(translogStats); } + assertZeroTranslogDownloadStats(translogStats); }); }, 5, TimeUnit.SECONDS); } + public void testStatsCorrectnessOnFailover() { + Settings clusterSettings = Settings.builder() + .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "100ms") + .put(LeaderChecker.LEADER_CHECK_INTERVAL_SETTING.getKey(), "500ms") + .put(LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) + .put(FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), "100ms") + .put(FollowersChecker.FOLLOWER_CHECK_INTERVAL_SETTING.getKey(), "500ms") + .put(FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) + .put(nodeSettings(0)) + .build(); + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(clusterSettings); + internalCluster().startDataOnlyNodes(2, clusterSettings); + + // Create an index with one primary and one replica shard + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 1)); + ensureGreen(INDEX_NAME); + + // Index some docs and refresh + indexDocs(); + refresh(INDEX_NAME); + + String primaryNode = primaryNodeName(INDEX_NAME); + String replicaNode = replicaNodeName(INDEX_NAME); + + // Start network disruption - primary node will be isolated + Set nodesInOneSide = Stream.of(clusterManagerNode, replicaNode).collect(Collectors.toCollection(HashSet::new)); + Set nodesInOtherSide = Stream.of(primaryNode).collect(Collectors.toCollection(HashSet::new)); + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInOtherSide), + NetworkDisruption.DISCONNECT + ); + internalCluster().setDisruptionScheme(networkDisruption); + logger.info("--> network disruption is started"); + networkDisruption.startDisrupting(); + ensureStableCluster(2, clusterManagerNode); + + RemoteStoreStatsResponse response = client(clusterManagerNode).admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + final String indexShardId = String.format(Locale.ROOT, "[%s][%s]", INDEX_NAME, "0"); + List matches = Arrays.stream(response.getRemoteStoreStats()) + .filter(stat -> indexShardId.equals(stat.getSegmentStats().shardId.toString())) + .collect(Collectors.toList()); + assertEquals(1, matches.size()); + RemoteSegmentTransferTracker.Stats segmentStats = matches.get(0).getSegmentStats(); + assertEquals(0, segmentStats.refreshTimeLagMs); + + networkDisruption.stopDisrupting(); + internalCluster().clearDisruptionScheme(); + ensureStableCluster(3, clusterManagerNode); + ensureGreen(INDEX_NAME); + logger.info("Test completed"); + } + + public void testZeroLagOnCreateIndex() throws InterruptedException { + setup(); + String clusterManagerNode = internalCluster().getClusterManagerName(); + + int numOfShards = randomIntBetween(1, 3); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, numOfShards)); + ensureGreen(INDEX_NAME); + long currentTimeNs = System.nanoTime(); + while (currentTimeNs == System.nanoTime()) { + Thread.sleep(10); + } + + for (int i = 0; i < numOfShards; i++) { + RemoteStoreStatsResponse response = client(clusterManagerNode).admin() + .cluster() + .prepareRemoteStoreStats(INDEX_NAME, String.valueOf(i)) + .get(); + for (RemoteStoreStats remoteStoreStats : response.getRemoteStoreStats()) { + assertEquals(0, remoteStoreStats.getSegmentStats().refreshTimeLagMs); + } + } + } + private void indexDocs() { for (int i = 0; i < randomIntBetween(5, 10); i++) { if (randomBoolean()) { @@ -553,9 +694,7 @@ private void indexDocs() { refresh(INDEX_NAME); } int numberOfOperations = randomIntBetween(10, 30); - for (int j = 0; j < numberOfOperations; j++) { - indexSingleDoc(INDEX_NAME); - } + indexBulk(INDEX_NAME, numberOfOperations); } } @@ -602,6 +741,43 @@ private void validateSegmentDownloadStats(RemoteSegmentTransferTracker.Stats sta assertTrue(stats.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage > 0); } + private void assertNonZeroTranslogUploadStatsNoFailures(RemoteTranslogTransferTracker.Stats stats) { + assertTrue(stats.uploadBytesStarted > 0); + assertTrue(stats.totalUploadsStarted > 0); + assertEquals(0, stats.uploadBytesFailed); + assertEquals(0, stats.totalUploadsFailed); + assertTrue(stats.uploadBytesSucceeded > 0); + assertTrue(stats.totalUploadsSucceeded > 0); + assertTrue(stats.totalUploadTimeInMillis > 0); + assertTrue(stats.lastSuccessfulUploadTimestamp > 0); + } + + private void assertZeroTranslogUploadStats(RemoteTranslogTransferTracker.Stats stats) { + assertEquals(0, stats.uploadBytesStarted); + assertEquals(0, stats.totalUploadsStarted); + assertEquals(0, stats.uploadBytesFailed); + assertEquals(0, stats.totalUploadsFailed); + assertEquals(0, stats.uploadBytesSucceeded); + assertEquals(0, stats.totalUploadsSucceeded); + assertEquals(0, stats.totalUploadTimeInMillis); + assertEquals(0, stats.lastSuccessfulUploadTimestamp); + } + + private void assertNonZeroTranslogDownloadStats(RemoteTranslogTransferTracker.Stats stats) { + assertTrue(stats.downloadBytesSucceeded > 0); + assertTrue(stats.totalDownloadsSucceeded > 0); + // TODO: Need to simulate a delay for this assertion to avoid flakiness + // assertTrue(stats.totalDownloadTimeInMillis > 0); + assertTrue(stats.lastSuccessfulDownloadTimestamp > 0); + } + + private void assertZeroTranslogDownloadStats(RemoteTranslogTransferTracker.Stats stats) { + assertEquals(0, stats.downloadBytesSucceeded); + assertEquals(0, stats.totalDownloadsSucceeded); + assertEquals(0, stats.totalDownloadTimeInMillis); + assertEquals(0, stats.lastSuccessfulDownloadTimestamp); + } + // Validate if the shardRouting obtained from cluster state contains the exact same routing object // parameters as obtained from the remote store stats API private void validateShardRouting(ShardRouting routing) { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java index abad56d892d88..4e3f01b8f257f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java @@ -34,13 +34,13 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -@OpenSearchIntegTestCase.ClusterScope(numDataNodes = 0) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class ReplicaToPrimaryPromotionIT extends RemoteStoreBaseIntegTestCase { private int shard_count = 5; @Before public void setup() { - setupRepo(); + internalCluster().startClusterManagerOnlyNode(); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java new file mode 100644 index 0000000000000..b7b3f1d14f422 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.index.Index; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.SegmentReplicationState; +import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.indices.replication.common.ReplicationCollection; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; +import java.util.Optional; +import java.util.Set; + +/** + * This class runs tests with remote store + segRep while blocking file downloads + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SegmentReplicationUsingRemoteStoreDisruptionIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { + + @Override + public Settings indexSettings() { + return remoteStoreIndexSettings(1); + } + + @Override + protected boolean addMockInternalEngine() { + return false; + } + + public void testCancelReplicationWhileSyncingSegments() throws Exception { + Path location = randomRepoPath().toAbsolutePath(); + setup(location, 0d, "metadata", Long.MAX_VALUE, 1); + + final Set dataNodeNames = internalCluster().getDataNodeNames(); + final String replicaNode = getNode(dataNodeNames, false); + final String primaryNode = getNode(dataNodeNames, true); + + SegmentReplicationTargetService targetService = internalCluster().getInstance(SegmentReplicationTargetService.class, replicaNode); + ensureGreen(INDEX_NAME); + blockNodeOnAnySegmentFile(REPOSITORY_NAME, replicaNode); + final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME); + indexSingleDoc(); + refresh(INDEX_NAME); + waitForBlock(replicaNode, REPOSITORY_NAME, TimeValue.timeValueSeconds(10)); + final SegmentReplicationState state = targetService.getOngoingEventSegmentReplicationState(indexShard.shardId()); + assertEquals(SegmentReplicationState.Stage.GET_FILES, state.getStage()); + ReplicationCollection.ReplicationRef segmentReplicationTargetReplicationRef = targetService.get( + state.getReplicationId() + ); + final SegmentReplicationTarget segmentReplicationTarget = segmentReplicationTargetReplicationRef.get(); + // close the target ref here otherwise it will hold a refcount + segmentReplicationTargetReplicationRef.close(); + assertNotNull(segmentReplicationTarget); + assertTrue(segmentReplicationTarget.refCount() > 0); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + assertBusy(() -> { + assertTrue(indexShard.routingEntry().primary()); + assertNull(targetService.getOngoingEventSegmentReplicationState(indexShard.shardId())); + assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); + }); + unblockNode(REPOSITORY_NAME, replicaNode); + cleanupRepo(); + } + + public void testCancelReplicationWhileFetchingMetadata() throws Exception { + Path location = randomRepoPath().toAbsolutePath(); + setup(location, 0d, "metadata", Long.MAX_VALUE, 1); + + final Set dataNodeNames = internalCluster().getDataNodeNames(); + final String replicaNode = getNode(dataNodeNames, false); + final String primaryNode = getNode(dataNodeNames, true); + + SegmentReplicationTargetService targetService = internalCluster().getInstance(SegmentReplicationTargetService.class, replicaNode); + ensureGreen(INDEX_NAME); + blockNodeOnAnyFiles(REPOSITORY_NAME, replicaNode); + final IndexShard indexShard = getIndexShard(replicaNode, INDEX_NAME); + indexSingleDoc(); + refresh(INDEX_NAME); + waitForBlock(replicaNode, REPOSITORY_NAME, TimeValue.timeValueSeconds(10)); + final SegmentReplicationState state = targetService.getOngoingEventSegmentReplicationState(indexShard.shardId()); + assertEquals(SegmentReplicationState.Stage.GET_CHECKPOINT_INFO, state.getStage()); + ReplicationCollection.ReplicationRef segmentReplicationTargetReplicationRef = targetService.get( + state.getReplicationId() + ); + final SegmentReplicationTarget segmentReplicationTarget = segmentReplicationTargetReplicationRef.get(); + // close the target ref here otherwise it will hold a refcount + segmentReplicationTargetReplicationRef.close(); + assertNotNull(segmentReplicationTarget); + assertTrue(segmentReplicationTarget.refCount() > 0); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + assertBusy(() -> { + assertTrue(indexShard.routingEntry().primary()); + assertNull(targetService.getOngoingEventSegmentReplicationState(indexShard.shardId())); + assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); + }); + unblockNode(REPOSITORY_NAME, replicaNode); + cleanupRepo(); + } + + private String getNode(Set dataNodeNames, boolean primary) { + assertEquals(2, dataNodeNames.size()); + for (String name : dataNodeNames) { + final IndexShard indexShard = getIndexShard(name, INDEX_NAME); + if (indexShard.routingEntry().primary() == primary) { + return name; + } + } + return null; + } + + private IndexShard getIndexShard(String node, String indexName) { + final Index index = resolveIndex(indexName); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); + IndexService indexService = indicesService.indexService(index); + assertNotNull(indexService); + final Optional shardId = indexService.shardIds().stream().findFirst(); + return shardId.map(indexService::getShard).orElse(null); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java index 7a2326b60c932..23864c35ad154 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java @@ -9,7 +9,6 @@ package org.opensearch.remotestore; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.indices.replication.SegmentReplicationIT; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; @@ -18,60 +17,38 @@ import java.nio.file.Path; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; /** * This class runs Segment Replication Integ test suite with remote store enabled. - * Setup is similar to SegmentReplicationRemoteStoreIT but this also enables the segment replication using remote store which - * is behind SEGMENT_REPLICATION_EXPERIMENTAL flag. After this is moved out of experimental, we can combine and keep only one - * test suite for Segment and Remote store integration tests. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SegmentReplicationUsingRemoteStoreIT extends SegmentReplicationIT { private static final String REPOSITORY_NAME = "test-remote-store-repo"; + protected Path absolutePath; @Override protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(remoteStoreClusterSettings(REPOSITORY_NAME)).build(); + if (absolutePath == null) { + absolutePath = randomRepoPath().toAbsolutePath(); + } + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) + .build(); } protected boolean segmentReplicationWithRemoteEnabled() { return true; } - @Override - protected Settings featureFlagSettings() { - return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .build(); - } - @Before public void setup() { internalCluster().startClusterManagerOnlyNode(); - Path absolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath)) - ); } @After public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); - } - - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/7592") - @Override - public void testPressureServiceStats() throws Exception { - super.testPressureServiceStats(); - } - - @Override - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8059") - public void testDropPrimaryDuringReplication() throws Exception { - super.testDropPrimaryDuringReplication(); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java index 99927797d5fbc..6cfc76b7e3223 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java @@ -9,7 +9,6 @@ package org.opensearch.remotestore; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.SegmentReplicationPressureIT; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; @@ -18,17 +17,15 @@ import java.nio.file.Path; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; /** * This class executes the SegmentReplicationPressureIT suite with remote store integration enabled. - * Setup is similar to SegmentReplicationPressureIT but this also enables the segment replication using remote store which - * is behind SEGMENT_REPLICATION_EXPERIMENTAL flag. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SegmentReplicationWithRemoteStorePressureIT extends SegmentReplicationPressureIT { private static final String REPOSITORY_NAME = "test-remote-store-repo"; + protected Path absolutePath; @Override protected boolean segmentReplicationWithRemoteEnabled() { @@ -36,30 +33,21 @@ protected boolean segmentReplicationWithRemoteEnabled() { } @Override - protected Settings featureFlagSettings() { + protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() - .put(super.featureFlagSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) .build(); } - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(remoteStoreClusterSettings(REPOSITORY_NAME)).build(); - } - @Before public void setup() { + absolutePath = randomRepoPath().toAbsolutePath(); internalCluster().startClusterManagerOnlyNode(); - Path absolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath)) - ); } @After public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartFileCorruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartFileCorruptionIT.java index f2d2e6c04d114..7112b266840ac 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartFileCorruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartFileCorruptionIT.java @@ -16,7 +16,6 @@ import org.opensearch.plugins.Plugin; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; -import org.junit.Before; import java.util.Collection; import java.util.stream.Collectors; @@ -31,11 +30,6 @@ protected Collection> nodePlugins() { return Stream.concat(super.nodePlugins().stream(), Stream.of(MockFsRepositoryPlugin.class)).collect(Collectors.toList()); } - @Before - public void setup() { - setupRepo(); - } - protected Settings remoteStoreIndexSettings() { return Settings.builder() .put(super.indexSettings()) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java index a523d5c0f5470..3dfde6f472525 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java @@ -8,31 +8,148 @@ package org.opensearch.remotestore.multipart; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.plugins.Plugin; import org.opensearch.remotestore.RemoteStoreIT; import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.junit.Before; import java.nio.file.Path; import java.util.Collection; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; public class RemoteStoreMultipartIT extends RemoteStoreIT { + Path repositoryLocation; + boolean compress; + boolean overrideBuildRepositoryMetadata; + @Override protected Collection> nodePlugins() { return Stream.concat(super.nodePlugins().stream(), Stream.of(MockFsRepositoryPlugin.class)).collect(Collectors.toList()); } @Override - protected void putRepository(Path path) { + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put( + remoteStoreClusterSettings( + REPOSITORY_NAME, + segmentRepoPath, + MockFsRepositoryPlugin.TYPE, + REPOSITORY_2_NAME, + translogRepoPath, + MockFsRepositoryPlugin.TYPE + ) + ) + .build(); + } + + @Before + public void setup() { + clusterSettingsSuppliedByTest = true; + overrideBuildRepositoryMetadata = false; + repositoryLocation = randomRepoPath(); + compress = randomBoolean(); + } + + @Override + public RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name) { + if (overrideBuildRepositoryMetadata) { + Map nodeAttributes = node.getAttributes(); + String type = nodeAttributes.get(String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, name)); + + String settingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + name + ); + Map settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> node.getAttributes().get(key))); + + Settings.Builder settings = Settings.builder(); + settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue())); + settings.put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true); + + if (name.equals(REPOSITORY_NAME)) { + settings.put("location", repositoryLocation) + .put("compress", compress) + .put("max_remote_upload_bytes_per_sec", "1kb") + .put("chunk_size", 100, ByteSizeUnit.BYTES); + return new RepositoryMetadata(name, MockFsRepositoryPlugin.TYPE, settings.build()); + } + + return new RepositoryMetadata(name, type, settings.build()); + } else { + return super.buildRepositoryMetadata(node, name); + } + + } + + public void testRateLimitedRemoteUploads() throws Exception { + clusterSettingsSuppliedByTest = true; + overrideBuildRepositoryMetadata = true; + Settings.Builder clusterSettings = Settings.builder() + .put(remoteStoreClusterSettings(REPOSITORY_NAME, repositoryLocation, REPOSITORY_2_NAME, repositoryLocation)); + clusterSettings.put( + String.format(Locale.getDefault(), "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, REPOSITORY_NAME), + MockFsRepositoryPlugin.TYPE + ); + internalCluster().startNode(clusterSettings.build()); + Client client = client(); + logger.info("--> updating repository"); assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME) + client.admin() + .cluster() + .preparePutRepository(REPOSITORY_NAME) .setType(MockFsRepositoryPlugin.TYPE) - .setSettings(Settings.builder().put("location", path)) + .setSettings( + Settings.builder() + .put("location", repositoryLocation) + .put("compress", compress) + .put("max_remote_upload_bytes_per_sec", "1kb") + .put("chunk_size", 100, ByteSizeUnit.BYTES) + ) ); + + createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 10; i++) { + index(INDEX_NAME, "_doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + // check if throttling is active + assertBusy(() -> { + long uploadPauseTime = 0L; + for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + uploadPauseTime += repositoriesService.repository(REPOSITORY_NAME).getRemoteUploadThrottleTimeInNanos(); + } + assertThat(uploadPauseTime, greaterThan(TimeValue.timeValueSeconds(randomIntBetween(5, 10)).nanos())); + }, 30, TimeUnit.SECONDS); + + assertThat(client.prepareSearch(INDEX_NAME).setSize(0).get().getHits().getTotalHits().value, equalTo(10L)); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsVerifyingBlobContainer.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java similarity index 74% rename from server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsVerifyingBlobContainer.java rename to server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java index d882220c9f4d7..36987ac2d4991 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsVerifyingBlobContainer.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java @@ -10,10 +10,11 @@ import org.apache.lucene.index.CorruptIndexException; import org.opensearch.common.StreamContext; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.blobstore.VerifyingMultiStreamBlobContainer; import org.opensearch.common.blobstore.fs.FsBlobContainer; import org.opensearch.common.blobstore.fs.FsBlobStore; +import org.opensearch.common.blobstore.stream.read.ReadContext; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.io.InputStreamContainer; import org.opensearch.core.action.ActionListener; @@ -24,17 +25,20 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; -public class MockFsVerifyingBlobContainer extends FsBlobContainer implements VerifyingMultiStreamBlobContainer { +public class MockFsAsyncBlobContainer extends FsBlobContainer implements AsyncMultiStreamBlobContainer { private static final int TRANSFER_TIMEOUT_MILLIS = 30000; private final boolean triggerDataIntegrityFailure; - public MockFsVerifyingBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path, boolean triggerDataIntegrityFailure) { + public MockFsAsyncBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path, boolean triggerDataIntegrityFailure) { super(blobStore, blobPath, path); this.triggerDataIntegrityFailure = triggerDataIntegrityFailure; } @@ -114,6 +118,31 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener comp } + @Override + public void readBlobAsync(String blobName, ActionListener listener) { + new Thread(() -> { + try { + long contentLength = listBlobs().get(blobName).length(); + long partSize = contentLength / 10; + int numberOfParts = (int) ((contentLength % partSize) == 0 ? contentLength / partSize : (contentLength / partSize) + 1); + List blobPartStreams = new ArrayList<>(); + for (int partNumber = 0; partNumber < numberOfParts; partNumber++) { + long offset = partNumber * partSize; + InputStreamContainer blobPartStream = new InputStreamContainer(readBlob(blobName, offset, partSize), partSize, offset); + blobPartStreams.add(() -> CompletableFuture.completedFuture(blobPartStream)); + } + ReadContext blobReadContext = new ReadContext(contentLength, blobPartStreams, null); + listener.onResponse(blobReadContext); + } catch (Exception e) { + listener.onFailure(e); + } + }).start(); + } + + public boolean remoteIntegrityCheckSupported() { + return true; + } + private boolean isSegmentFile(String filename) { return !filename.endsWith(".tlog") && !filename.endsWith(".ckp"); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsBlobStore.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsBlobStore.java index f1d9fbba84528..77b0cac922014 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsBlobStore.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsBlobStore.java @@ -28,7 +28,7 @@ public MockFsBlobStore(int bufferSizeInBytes, Path path, boolean readonly, boole @Override public BlobContainer blobContainer(BlobPath path) { try { - return new MockFsVerifyingBlobContainer(this, path, buildAndCreate(path), triggerDataIntegrityFailure); + return new MockFsAsyncBlobContainer(this, path, buildAndCreate(path), triggerDataIntegrityFailure); } catch (IOException ex) { throw new OpenSearchException("failed to create blob container", ex); } diff --git a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java index f149d538cc47a..b8415f4b41815 100644 --- a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java @@ -108,4 +108,16 @@ public void testUpdateRepository() { final Repository updatedRepository = repositoriesService.repository(repositoryName); assertThat(updatedRepository, updated ? not(sameInstance(originalRepository)) : sameInstance(originalRepository)); } + + public void testSystemRepositoryCantBeCreated() { + internalCluster(); + final String repositoryName = "test-repo"; + final Client client = client(); + final Settings.Builder repoSettings = Settings.builder().put("system_repository", true).put("location", randomRepoPath()); + + assertThrows( + RepositoryException.class, + () -> client.admin().cluster().preparePutRepository(repositoryName).setType(FsRepository.TYPE).setSettings(repoSettings).get() + ); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java index e125492d4b2c5..2fbaf4ea5a4d3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/script/ScriptCacheIT.java @@ -8,8 +8,11 @@ package org.opensearch.script; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.rest.RestStatus; import org.opensearch.index.MockEngineFactoryPlugin; @@ -18,12 +21,13 @@ import org.opensearch.plugins.Plugin; import org.opensearch.search.MockSearchService; import org.opensearch.test.MockHttpTransport; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.TestGeoShapeFieldMapperPlugin; import org.opensearch.test.store.MockFSIndexStore; import org.opensearch.test.transport.MockTransportService; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -31,9 +35,26 @@ import java.util.concurrent.ExecutionException; import java.util.function.Function; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.apache.logging.log4j.core.util.Throwables.getRootCause; -public class ScriptCacheIT extends OpenSearchIntegTestCase { +public class ScriptCacheIT extends ParameterizedOpenSearchIntegTestCase { + public ScriptCacheIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java index ca0fb106c2d70..0975585e2984d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java @@ -16,6 +16,7 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.get.MultiGetResponse; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.SearchPhaseName; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; @@ -56,9 +57,11 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.action.search.TransportSearchAction.SEARCH_REQUEST_STATS_ENABLED_KEY; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, minNumDataNodes = 3) @@ -74,6 +77,7 @@ public void testSearchWithWRRShardRouting() throws IOException { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone" + ".values", "a,b,c") .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone") .put("cluster.routing.weighted.fail_open", false) + .put(SEARCH_REQUEST_STATS_ENABLED_KEY, true) .build(); logger.info("--> starting 6 nodes on different zones"); @@ -180,12 +184,39 @@ public void testSearchWithWRRShardRouting() throws IOException { assertFalse(!hitNodes.contains(nodeId)); } nodeStats = client().admin().cluster().prepareNodesStats().execute().actionGet(); + int num = 0; + int coordNumber = 0; for (NodeStats stat : nodeStats.getNodes()) { SearchStats.Stats searchStats = stat.getIndices().getSearch().getTotal(); + if (searchStats.getRequestStatsLongHolder() + .getRequestStatsHolder() + .get(SearchPhaseName.QUERY.getName()) + .getTimeInMillis() > 0) { + assertThat( + searchStats.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.QUERY.getName()).getTotal(), + greaterThan(0L) + ); + assertThat( + searchStats.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTimeInMillis(), + greaterThan(0L) + ); + assertThat( + searchStats.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTotal(), + greaterThan(0L) + ); + assertThat( + searchStats.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.EXPAND.getName()).getTotal(), + greaterThan(0L) + ); + coordNumber += 1; + } Assert.assertTrue(searchStats.getQueryCount() > 0L); Assert.assertTrue(searchStats.getFetchCount() > 0L); + num++; } + assertThat(coordNumber, greaterThan(0)); + assertThat(num, greaterThan(0)); } private Map> setupCluster(int nodeCountPerAZ, Settings commonSettings) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchWithRejectionsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchWithRejectionsIT.java index 87c6aa2202ff5..24c72a66da6d0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchWithRejectionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchWithRejectionsIT.java @@ -32,20 +32,45 @@ package org.opensearch.search; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import java.util.Arrays; +import java.util.Collection; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SearchWithRejectionsIT extends OpenSearchIntegTestCase { +public class SearchWithRejectionsIT extends ParameterizedOpenSearchIntegTestCase { + + public SearchWithRejectionsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java b/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java index 42e515cca9b6b..a61102b9db144 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/StressSearchServiceReaperIT.java @@ -31,23 +31,45 @@ package org.opensearch.search; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.English; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import java.util.Arrays; +import java.util.Collection; import java.util.concurrent.ExecutionException; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.OpenSearchIntegTestCase.Scope.SUITE; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @ClusterScope(scope = SUITE) -public class StressSearchServiceReaperIT extends OpenSearchIntegTestCase { +public class StressSearchServiceReaperIT extends ParameterizedOpenSearchIntegTestCase { + public StressSearchServiceReaperIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Settings nodeSettings(int nodeOrdinal) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java index b73b7722f9728..257786c1e9ce5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java @@ -32,11 +32,15 @@ package org.opensearch.search.aggregations; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; import org.opensearch.search.aggregations.bucket.terms.RareTermsAggregationBuilder; import org.opensearch.search.aggregations.bucket.terms.SignificantTermsAggregationBuilder; @@ -45,16 +49,20 @@ import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class AggregationsIntegrationIT extends OpenSearchIntegTestCase { +public class AggregationsIntegrationIT extends ParameterizedOpenSearchIntegTestCase { static int numDocs; @@ -63,6 +71,23 @@ public class AggregationsIntegrationIT extends OpenSearchIntegTestCase { + LARGE_STRING.length() + "] used in the request has exceeded the allowed maximum"; + public AggregationsIntegrationIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(prepareCreate("index").setMapping("f", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java index d35a560b0986c..3d3cf1943dfe3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java @@ -32,20 +32,27 @@ package org.opensearch.search.aggregations; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.missing.Missing; import org.opensearch.search.aggregations.bucket.terms.Terms; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.Matchers; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.missing; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -54,7 +61,24 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.core.IsNull.notNullValue; -public class CombiIT extends OpenSearchIntegTestCase { +public class CombiIT extends ParameterizedOpenSearchIntegTestCase { + + public CombiIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } /** * Making sure that if there are multiple aggregations, working on the same field, yet require different diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java index 21f833d5430db..2ffdf5fb32778 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java @@ -32,10 +32,14 @@ package org.opensearch.search.aggregations; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.RangeQueryBuilder; @@ -52,11 +56,12 @@ import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory; import org.opensearch.search.aggregations.metrics.Sum; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.junit.After; import org.junit.Before; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -68,6 +73,7 @@ import java.util.function.Function; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.extendedStats; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -88,7 +94,24 @@ * Additional tests that aim at testing more complex aggregation trees on larger random datasets, so that things like * the growth of dynamic arrays is tested. */ -public class EquivalenceIT extends OpenSearchIntegTestCase { +public class EquivalenceIT extends ParameterizedOpenSearchIntegTestCase { + + public EquivalenceIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java index f210af7c10fb3..1bc0cb36f5fe3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java @@ -32,25 +32,49 @@ package org.opensearch.search.aggregations; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.InternalBucketMetricValue; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.search.aggregations.PipelineAggregatorBuilders.maxBucket; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; -public class MetadataIT extends OpenSearchIntegTestCase { +public class MetadataIT extends ParameterizedOpenSearchIntegTestCase { + + public MetadataIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } public void testMetadataSetOnAggregationResult() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("name", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java index 26bfe59618275..e6325987d330f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java @@ -32,8 +32,12 @@ package org.opensearch.search.aggregations; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.terms.Terms; @@ -43,7 +47,12 @@ import org.opensearch.search.aggregations.metrics.Percentiles; import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.cardinality; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.geoCentroid; @@ -56,7 +65,24 @@ import static org.hamcrest.Matchers.closeTo; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class MissingValueIT extends OpenSearchIntegTestCase { +public class MissingValueIT extends ParameterizedOpenSearchIntegTestCase { + + public MissingValueIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected int maximumNumberOfShards() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/AdjacencyMatrixIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/AdjacencyMatrixIT.java index 011ebf8add92a..cd0922606ec99 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/AdjacencyMatrixIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/AdjacencyMatrixIT.java @@ -32,11 +32,14 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.BoolQueryBuilder; @@ -47,15 +50,19 @@ import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.adjacencyMatrix; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -68,11 +75,28 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class AdjacencyMatrixIT extends OpenSearchIntegTestCase { +public class AdjacencyMatrixIT extends ParameterizedOpenSearchIntegTestCase { static int numDocs, numSingleTag1Docs, numSingleTag2Docs, numTag1Docs, numTag2Docs, numMultiTagDocs; static final int MAX_NUM_FILTERS = 3; + public AdjacencyMatrixIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java index fc5407c4cade8..7ab1a44ce220c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/BooleanTermsIT.java @@ -31,26 +31,52 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class BooleanTermsIT extends OpenSearchIntegTestCase { +public class BooleanTermsIT extends ParameterizedOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "b_value"; private static final String MULTI_VALUED_FIELD_NAME = "b_values"; static int numSingleTrues, numSingleFalses, numMultiTrues, numMultiFalses; + public BooleanTermsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java index ec7278f74e8af..ee94e574228df 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramIT.java @@ -31,6 +31,8 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; @@ -39,6 +41,7 @@ import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateFormatters; import org.opensearch.common.time.DateMathParser; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.query.MatchNoneQueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -56,6 +59,7 @@ import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.Matchers; import org.junit.After; @@ -78,6 +82,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -93,7 +98,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DateHistogramIT extends OpenSearchIntegTestCase { +public class DateHistogramIT extends ParameterizedOpenSearchIntegTestCase { static Map> expectedMultiSortBuckets; @@ -101,8 +106,25 @@ private ZonedDateTime date(int month, int day) { return ZonedDateTime.of(2012, month, day, 0, 0, 0, 0, ZoneOffset.UTC); } + public DateHistogramIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + private ZonedDateTime date(String date) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date)); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date)); } private static String format(ZonedDateTime date, String pattern) { @@ -1459,7 +1481,7 @@ public void testExceptionOnNegativeInterval() { /** * https://github.com/elastic/elasticsearch/issues/31760 shows an edge case where an unmapped "date" field in two indices * that are queried simultaneously can lead to the "format" parameter in the aggregation not being preserved correctly. - * + *

      * The error happens when the bucket from the "unmapped" index is received first in the reduce phase, however the case can * be recreated when aggregating about a single index with an unmapped date field and also getting "empty" buckets. */ @@ -1602,8 +1624,8 @@ public void testScriptCaching() throws Exception { .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) .get() ); - String date = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(1, 1)); - String date2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(2, 1)); + String date = DateFieldMapper.getDefaultDateTimeFormatter().format(date(1, 1)); + String date2 = DateFieldMapper.getDefaultDateTimeFormatter().format(date(2, 1)); indexRandom( true, client().prepareIndex("cache_test_idx").setId("1").setSource("d", date), @@ -1733,6 +1755,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + cluster().wipeIndices("cache_test_idx"); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyDesc() throws Exception { @@ -1850,6 +1873,7 @@ public void testDateNanosHistogram() throws Exception { assertEquals(1, buckets.get(0).getDocCount()); assertEquals(946771200000L, ((ZonedDateTime) buckets.get(1).getKey()).toEpochSecond() * 1000); assertEquals(1, buckets.get(1).getDocCount()); + cluster().wipeIndices("nanos"); } public void testDateKeyFormatting() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java index 19e5bdb8916b8..d44071e1ef9c5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java @@ -31,24 +31,32 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateFormatters; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.time.ZoneOffset; import java.time.ZonedDateTime; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.IsNull.notNullValue; @@ -61,13 +69,30 @@ */ @OpenSearchIntegTestCase.SuiteScopeTestCase @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class DateHistogramOffsetIT extends OpenSearchIntegTestCase { +public class DateHistogramOffsetIT extends ParameterizedOpenSearchIntegTestCase { private static final String DATE_FORMAT = "yyyy-MM-dd:hh-mm-ss"; private static final DateFormatter FORMATTER = DateFormatter.forPattern(DATE_FORMAT); + public DateHistogramOffsetIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + private ZonedDateTime date(String date) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date)); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date)); } @Before diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateRangeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateRangeIT.java index 470ee6a4d2cea..ae4243019ffb1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateRangeIT.java @@ -31,11 +31,14 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.Plugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; @@ -46,6 +49,7 @@ import org.opensearch.search.aggregations.bucket.range.Range.Bucket; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.time.ZoneId; @@ -63,6 +67,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.dateRange; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; @@ -76,7 +81,24 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DateRangeIT extends OpenSearchIntegTestCase { +public class DateRangeIT extends ParameterizedOpenSearchIntegTestCase { + + public DateRangeIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } private static IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception { return client().prepareIndex("idx") @@ -1062,6 +1084,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } /** @@ -1124,6 +1147,7 @@ public void testRangeWithFormatStringValue() throws Exception { .get() ); assertThat(e.getDetailedMessage(), containsString("failed to parse date field [1000000] with format [strict_hour_minute_second]")); + internalCluster().wipeIndices(indexName); } /** @@ -1196,6 +1220,7 @@ public void testRangeWithFormatNumericValue() throws Exception { buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); assertBucket(buckets.get(0), 2L, "1000000-3000000", 1000000L, 3000000L); assertBucket(buckets.get(1), 1L, "3000000-4000000", 3000000L, 4000000L); + internalCluster().wipeIndices(indexName); } private static List checkBuckets(Range dateRange, String expectedAggName, long expectedBucketsSize) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java index 0d133a933df1f..5e95073209c71 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java @@ -31,10 +31,13 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.refresh.RefreshRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.sampler.DiversifiedAggregationBuilder; @@ -45,12 +48,15 @@ import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.opensearch.search.aggregations.metrics.Max; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import java.util.Arrays; import java.util.Collection; import java.util.List; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.max; import static org.opensearch.search.aggregations.AggregationBuilders.sampler; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -65,10 +71,27 @@ * Tests the Sampler aggregation */ @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DiversifiedSamplerIT extends OpenSearchIntegTestCase { +public class DiversifiedSamplerIT extends ParameterizedOpenSearchIntegTestCase { public static final int NUM_SHARDS = 2; + public DiversifiedSamplerIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SamplerAggregator.ExecutionMode.values()).toString(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java index b740271cdef77..88bb41923e53f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DoubleTermsIT.java @@ -88,6 +88,10 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class DoubleTermsIT extends AbstractTermsTestCase { + public DoubleTermsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); @@ -1106,5 +1110,6 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterIT.java index ef455bf353ce4..7aa98803403e0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FilterIT.java @@ -31,9 +31,13 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.QueryBuilder; @@ -42,14 +46,18 @@ import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -60,10 +68,27 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class FilterIT extends OpenSearchIntegTestCase { +public class FilterIT extends ParameterizedOpenSearchIntegTestCase { static int numDocs, numTag1Docs; + public FilterIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FiltersIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FiltersIT.java index 4c5033b957d00..b6cf515df78ba 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FiltersIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/FiltersIT.java @@ -32,9 +32,13 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.QueryBuilder; @@ -44,6 +48,7 @@ import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -56,6 +61,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.filters; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -66,10 +72,27 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class FiltersIT extends OpenSearchIntegTestCase { +public class FiltersIT extends ParameterizedOpenSearchIntegTestCase { static int numDocs, numTag1Docs, numTag2Docs, numOtherDocs; + public FiltersIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java index a345d6e3ceb3b..bfacbc1c17a3c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java @@ -31,6 +31,8 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.Version; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; @@ -39,6 +41,7 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.DistanceUnit; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.InternalAggregation; @@ -47,17 +50,20 @@ import org.opensearch.search.aggregations.bucket.range.Range.Bucket; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import org.hamcrest.Matchers; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.geoDistance; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -70,7 +76,11 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GeoDistanceIT extends OpenSearchIntegTestCase { +public class GeoDistanceIT extends ParameterizedOpenSearchIntegTestCase { + + public GeoDistanceIT(Settings dynamicSettings) { + super(dynamicSettings); + } @Override protected boolean forbidPrivateIndexSettings() { @@ -79,6 +89,19 @@ protected boolean forbidPrivateIndexSettings() { private Version version = VersionUtils.randomIndexCompatibleVersion(random()); + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + private IndexRequestBuilder indexCity(String idx, String name, String... latLons) throws Exception { XContentBuilder source = jsonBuilder().startObject().field("city", name); source.startArray("location"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GlobalIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GlobalIT.java index 8a97d9c9e75dd..be31a3afadad0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GlobalIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GlobalIT.java @@ -31,19 +31,27 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.global; import static org.opensearch.search.aggregations.AggregationBuilders.stats; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @@ -53,10 +61,27 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GlobalIT extends OpenSearchIntegTestCase { +public class GlobalIT extends ParameterizedOpenSearchIntegTestCase { static int numDocs; + public GlobalIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java index 6d5918ffa7f0d..75f57d1cc4c0e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/HistogramIT.java @@ -31,11 +31,14 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; @@ -53,10 +56,12 @@ import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -69,6 +74,7 @@ import static java.util.Collections.emptyMap; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -85,7 +91,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class HistogramIT extends OpenSearchIntegTestCase { +public class HistogramIT extends ParameterizedOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; private static final String MULTI_VALUED_FIELD_NAME = "l_values"; @@ -96,6 +102,23 @@ public class HistogramIT extends OpenSearchIntegTestCase { static long[] valueCounts, valuesCounts; static Map> expectedMultiSortBuckets; + public HistogramIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); @@ -1144,6 +1167,7 @@ public void testDecimalIntervalAndOffset() throws Exception { assertEquals(1, buckets.get(0).getDocCount()); assertEquals(0.05, (double) buckets.get(1).getKey(), 0.01d); assertEquals(1, buckets.get(1).getDocCount()); + internalCluster().wipeIndices("decimal_values"); } /** @@ -1285,6 +1309,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAndKeyDesc() throws Exception { @@ -1388,6 +1413,7 @@ public void testHardBounds() throws Exception { buckets = histogram.getBuckets(); assertEquals(1, buckets.size()); assertEquals(0.1, (double) buckets.get(0).getKey(), 0.01d); + internalCluster().wipeIndices("test"); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpRangeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpRangeIT.java index f8f666aaa3c1b..14a3685bd183e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpRangeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpRangeIT.java @@ -31,9 +31,13 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; @@ -41,6 +45,7 @@ import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.search.aggregations.bucket.range.Range; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; @@ -48,13 +53,31 @@ import java.util.Map; import java.util.function.Function; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class IpRangeIT extends OpenSearchIntegTestCase { +public class IpRangeIT extends ParameterizedOpenSearchIntegTestCase { + + public IpRangeIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } public static class DummyScriptPlugin extends MockScriptPlugin { @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpTermsIT.java index cff51e74fdbd0..c712c97af5c71 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/IpTermsIT.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations.bucket; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; import org.opensearch.script.Script; @@ -50,6 +51,10 @@ public class IpTermsIT extends AbstractTermsTestCase { + public IpTermsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java index a03c7c4005959..41cf0529f78f5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java @@ -86,6 +86,10 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class LongTermsIT extends AbstractTermsTestCase { + public LongTermsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); @@ -1054,5 +1058,6 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java index 4c5d9fb60d4f7..90dafc0d57887 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MinDocCountIT.java @@ -37,6 +37,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.query.QueryBuilder; @@ -81,6 +82,10 @@ public class MinDocCountIT extends AbstractTermsTestCase { private static final QueryBuilder QUERY = QueryBuilders.termQuery("match", true); private static int cardinality; + public MinDocCountIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java index 0b32e30da72cf..8d79f581a0ab4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/MultiTermsIT.java @@ -9,6 +9,7 @@ package org.opensearch.search.aggregations.bucket; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.aggregations.bucket.terms.BaseStringTermsTestCase; @@ -33,6 +34,10 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class MultiTermsIT extends BaseStringTermsTestCase { + public MultiTermsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + // the main purpose of this test is to make sure we're not allocating 2GB of memory per shard public void testSizeIsZero() { final int minDocCount = randomInt(1); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java index 3b3f169f7578b..1ef2c0e8db8c7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NaNSortingIT.java @@ -32,8 +32,12 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.common.util.Comparators; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -47,8 +51,13 @@ import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.extendedStats; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -58,7 +67,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class NaNSortingIT extends OpenSearchIntegTestCase { +public class NaNSortingIT extends ParameterizedOpenSearchIntegTestCase { private enum SubAggregation { AVG("avg") { @@ -130,6 +139,23 @@ public String sortKey() { public abstract double getValue(Aggregation aggregation); } + public NaNSortingIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("string_value", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java index 7efb16c8b719c..b3009ffcf4f45 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/NestedIT.java @@ -31,12 +31,15 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.join.ScoreMode; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -54,9 +57,12 @@ import org.opensearch.search.aggregations.metrics.Stats; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; @@ -66,6 +72,7 @@ import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.nestedQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.max; @@ -85,12 +92,29 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class NestedIT extends OpenSearchIntegTestCase { +public class NestedIT extends ParameterizedOpenSearchIntegTestCase { private static int numParents; private static int[] numChildren; private static SubAggCollectionMode aggCollectionMode; + public NestedIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { @@ -532,6 +556,7 @@ public void testParentFilterResolvedCorrectly() throws Exception { assertThat(nestedTags.getDocCount(), equalTo(0L)); // This must be 0 tags = nestedTags.getAggregations().get("tag"); assertThat(tags.getBuckets().size(), equalTo(0)); // and this must be empty + internalCluster().wipeIndices("idx2"); } public void testNestedSameDocIdProcessedMultipleTime() throws Exception { @@ -638,6 +663,7 @@ public void testNestedSameDocIdProcessedMultipleTime() throws Exception { assertThat(propertyId.getBucketByKey("1").getDocCount(), equalTo(1L)); assertThat(propertyId.getBucketByKey("2").getDocCount(), equalTo(1L)); assertThat(propertyId.getBucketByKey("3").getDocCount(), equalTo(1L)); + internalCluster().wipeIndices("idx4"); } public void testFilterAggInsideNestedAgg() throws Exception { @@ -800,6 +826,7 @@ public void testFilterAggInsideNestedAgg() throws Exception { assertThat(bucket.getDocCount(), equalTo(1L)); numStringParams = bucket.getAggregations().get("num_string_params"); assertThat(numStringParams.getDocCount(), equalTo(0L)); + internalCluster().wipeIndices("classes"); } public void testExtractInnerHitBuildersWithDuplicateHitName() throws Exception { @@ -824,6 +851,7 @@ public void testExtractInnerHitBuildersWithDuplicateHitName() throws Exception { RestStatus.BAD_REQUEST, containsString("[inner_hits] already contains an entry for key [ih1]") ); + internalCluster().wipeIndices("idxduplicatehitnames"); } public void testExtractInnerHitBuildersWithDuplicatePath() throws Exception { @@ -846,5 +874,6 @@ public void testExtractInnerHitBuildersWithDuplicatePath() throws Exception { RestStatus.BAD_REQUEST, containsString("[inner_hits] already contains an entry for key [property]") ); + internalCluster().wipeIndices("idxnullhitnames"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java index c46d6dcd847e1..64ab6f1382ac3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/RangeIT.java @@ -31,10 +31,13 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; import org.opensearch.script.Script; @@ -48,9 +51,11 @@ import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -60,6 +65,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.range; import static org.opensearch.search.aggregations.AggregationBuilders.sum; @@ -73,13 +79,30 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class RangeIT extends OpenSearchIntegTestCase { +public class RangeIT extends ParameterizedOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; private static final String MULTI_VALUED_FIELD_NAME = "l_values"; static int numDocs; + public RangeIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); @@ -1061,6 +1084,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } public void testFieldAlias() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ReverseNestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ReverseNestedIT.java index 749f2170dab50..2716db6b7e745 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ReverseNestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ReverseNestedIT.java @@ -31,9 +31,12 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.BucketOrder; @@ -44,9 +47,11 @@ import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.metrics.ValueCount; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -55,6 +60,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.count; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.nested; @@ -70,7 +76,24 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ReverseNestedIT extends OpenSearchIntegTestCase { +public class ReverseNestedIT extends ParameterizedOpenSearchIntegTestCase { + + public ReverseNestedIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override public void setupSuiteScopeCluster() throws Exception { @@ -726,6 +749,7 @@ public void testSameParentDocHavingMultipleBuckets() throws Exception { ValueCount barCount = reverseToBar.getAggregations().get("sku_count"); assertThat(barCount.getValue(), equalTo(2L)); } + internalCluster().wipeIndices("idx3"); } public void testFieldAlias() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java index 587bf2a707710..7033c42c5d661 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SamplerIT.java @@ -32,10 +32,13 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.refresh.RefreshRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.sampler.Sampler; @@ -45,11 +48,15 @@ import org.opensearch.search.aggregations.bucket.terms.Terms.Bucket; import org.opensearch.search.aggregations.metrics.Max; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.max; import static org.opensearch.search.aggregations.AggregationBuilders.sampler; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -64,7 +71,7 @@ * Tests the Sampler aggregation */ @OpenSearchIntegTestCase.SuiteScopeTestCase -public class SamplerIT extends OpenSearchIntegTestCase { +public class SamplerIT extends ParameterizedOpenSearchIntegTestCase { public static final int NUM_SHARDS = 2; @@ -72,6 +79,23 @@ public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SamplerAggregator.ExecutionMode.values()).toString(); } + public SamplerIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java index faa6a54394b00..66d761c56634e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardReduceIT.java @@ -31,8 +31,12 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.geometry.utils.Geohash; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; @@ -45,8 +49,13 @@ import org.opensearch.search.aggregations.bucket.range.Range; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.dateRange; import static org.opensearch.search.aggregations.AggregationBuilders.filter; @@ -68,7 +77,24 @@ * we can make sure that the reduce is properly propagated by checking that empty buckets were created. */ @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ShardReduceIT extends OpenSearchIntegTestCase { +public class ShardReduceIT extends ParameterizedOpenSearchIntegTestCase { + + public ShardReduceIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } private IndexRequestBuilder indexDoc(String date, int value) throws Exception { return client().prepareIndex("idx") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java index c89a694271703..145830f02ee56 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations.bucket; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.terms.Terms; @@ -45,6 +46,11 @@ import static org.hamcrest.Matchers.equalTo; public class ShardSizeTermsIT extends ShardSizeTestCase { + + public ShardSizeTermsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + public void testNoShardSizeString() throws Exception { createIdx("type=keyword"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 5bf403d19ed9f..e914b87754865 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -31,11 +31,14 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; @@ -62,6 +65,7 @@ import org.opensearch.search.aggregations.bucket.terms.heuristic.ScriptHeuristic; import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.search.aggregations.bucket.SharedSignificantTermsTestMethods; import java.io.IOException; @@ -78,6 +82,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.significantTerms; import static org.opensearch.search.aggregations.AggregationBuilders.significantText; @@ -90,12 +95,29 @@ import static org.hamcrest.Matchers.is; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SignificantTermsSignificanceScoreIT extends OpenSearchIntegTestCase { +public class SignificantTermsSignificanceScoreIT extends ParameterizedOpenSearchIntegTestCase { static final String INDEX_NAME = "testidx"; static final String TEXT_FIELD = "text"; static final String CLASS_FIELD = "class"; + public SignificantTermsSignificanceScoreIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Collection> nodePlugins() { return Arrays.asList(TestScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java index 63385b55f47e8..3fcf4b5d533d4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -32,23 +32,30 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.Terms.Bucket; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -60,7 +67,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class TermsDocCountErrorIT extends OpenSearchIntegTestCase { +public class TermsDocCountErrorIT extends ParameterizedOpenSearchIntegTestCase { private static final String STRING_FIELD_NAME = "s_value"; private static final String LONG_FIELD_NAME = "l_value"; @@ -72,6 +79,23 @@ public static String randomExecutionHint() { private static int numRoutingValues; + public TermsDocCountErrorIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping(STRING_FIELD_NAME, "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java index e7e826d981c84..b0d8e7ea02e8f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java @@ -31,9 +31,12 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.BucketOrder; @@ -41,13 +44,16 @@ import org.opensearch.search.aggregations.bucket.terms.SignificantTerms; import org.opensearch.search.aggregations.bucket.terms.SignificantTermsAggregatorFactory; import org.opensearch.search.aggregations.bucket.terms.Terms; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.significantTerms; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -55,9 +61,27 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; -public class TermsShardMinDocCountIT extends OpenSearchIntegTestCase { +public class TermsShardMinDocCountIT extends ParameterizedOpenSearchIntegTestCase { + private static final String index = "someindex"; + public TermsShardMinDocCountIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + private static String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SignificantTermsAggregatorFactory.ExecutionMode.values()).toString(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java index 969cbf272fab0..20caa4fd076fe 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/BaseStringTermsTestCase.java @@ -9,6 +9,7 @@ package org.opensearch.search.aggregations.bucket.terms; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; @@ -37,6 +38,10 @@ public class BaseStringTermsTestCase extends AbstractTermsTestCase { protected static final String MULTI_VALUED_FIELD_NAME = "s_values"; protected static Map> expectedMultiSortBuckets; + public BaseStringTermsTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java index 1f1da9627d5ea..8c727d280ec52 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/terms/StringTermsIT.java @@ -79,6 +79,10 @@ @OpenSearchIntegTestCase.SuiteScopeTestCase public class StringTermsIT extends BaseStringTermsTestCase { + public StringTermsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + // the main purpose of this test is to make sure we're not allocating 2GB of memory per shard public void testSizeIsZero() { final int minDocCount = randomInt(1); @@ -1127,6 +1131,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } public void testScriptWithValueType() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java index 147f451c14de8..9ebec21367164 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java @@ -32,9 +32,12 @@ package org.opensearch.search.aggregations.metrics; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptPlugin; @@ -45,7 +48,9 @@ import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -55,6 +60,7 @@ import static java.util.Collections.emptyMap; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.cardinality; import static org.opensearch.search.aggregations.AggregationBuilders.global; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -65,7 +71,24 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class CardinalityIT extends OpenSearchIntegTestCase { +public class CardinalityIT extends ParameterizedOpenSearchIntegTestCase { + + public CardinalityIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { @@ -615,5 +638,6 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java index a3221e128a5dc..2bf5230c67b43 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java @@ -32,23 +32,46 @@ package org.opensearch.search.aggregations.metrics; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.BucketOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import java.util.Arrays; +import java.util.Collection; import java.util.Map; import java.util.stream.IntStream; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.cardinality; import static org.opensearch.search.aggregations.AggregationBuilders.terms; -public class CardinalityWithRequestBreakerIT extends OpenSearchIntegTestCase { +public class CardinalityWithRequestBreakerIT extends ParameterizedOpenSearchIntegTestCase { + + public CardinalityWithRequestBreakerIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } /** * Test that searches using cardinality aggregations returns all request breaker memory. diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ExtendedStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ExtendedStatsIT.java index 2efb49c488d76..3d804b9aa626e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ExtendedStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ExtendedStatsIT.java @@ -70,6 +70,10 @@ public class ExtendedStatsIT extends AbstractNumericTestCase { + public ExtendedStatsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(AggregationTestScriptsPlugin.class); @@ -995,6 +999,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java index ffc31b7cdb7c4..78100d1778ecf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/GeoCentroidIT.java @@ -34,6 +34,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.settings.Settings; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.test.OpenSearchIntegTestCase; @@ -54,6 +55,10 @@ public class GeoCentroidIT extends AbstractGeoTestCase { private static final String aggName = "geoCentroid"; + public GeoCentroidIT(Settings dynamicSettings) { + super(dynamicSettings); + } + public void testEmptyAggregation() throws Exception { SearchResponse response = client().prepareSearch(EMPTY_IDX_NAME) .setQuery(matchAllQuery()) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksIT.java index 87e5a73ef630d..7ca5130388eea 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -72,6 +72,10 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase { + public HDRPercentileRanksIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(AggregationTestScriptsPlugin.class); @@ -716,6 +720,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentilesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentilesIT.java index ad3fd6517d1b1..ec913b3e130f5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/HDRPercentilesIT.java @@ -75,6 +75,10 @@ public class HDRPercentilesIT extends AbstractNumericTestCase { + public HDRPercentilesIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(AggregationTestScriptsPlugin.class); @@ -687,6 +691,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java index 6af65beba6124..b8447d682abae 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationIT.java @@ -91,6 +91,10 @@ public class MedianAbsoluteDeviationIT extends AbstractNumericTestCase { private static double singleValueExactMAD; private static double multiValueExactMAD; + public MedianAbsoluteDeviationIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override public void setupSuiteScopeCluster() throws Exception { final Settings settings = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).build(); @@ -643,5 +647,6 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ScriptedMetricIT.java index 5c782c6d085b4..ced2358ac3f78 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -32,11 +32,14 @@ package org.opensearch.search.aggregations.metrics; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -53,12 +56,14 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.junit.Before; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -69,6 +74,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.global; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.scriptedMetric; @@ -87,10 +93,27 @@ @ClusterScope(scope = Scope.SUITE) @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ScriptedMetricIT extends OpenSearchIntegTestCase { +public class ScriptedMetricIT extends ParameterizedOpenSearchIntegTestCase { private static long numDocs; + public ScriptedMetricIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); @@ -1378,6 +1401,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } public void testConflictingAggAndScriptParams() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/StatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/StatsIT.java index e02657670b943..f957a74eeb9d0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/StatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/StatsIT.java @@ -66,6 +66,10 @@ import static org.hamcrest.Matchers.sameInstance; public class StatsIT extends AbstractNumericTestCase { + public StatsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(AggregationTestScriptsPlugin.class); @@ -384,5 +388,6 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/SumIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/SumIT.java index fe236f04c19e8..382d656448114 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/SumIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/SumIT.java @@ -68,6 +68,10 @@ public class SumIT extends AbstractNumericTestCase { + public SumIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(MetricAggScriptPlugin.class); @@ -359,6 +363,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } public void testFieldAlias() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index ab0cdbaf3047f..941d3a888db29 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -72,6 +72,10 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase { + public TDigestPercentileRanksIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(AggregationTestScriptsPlugin.class); @@ -626,6 +630,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesIT.java index 2c05ed0bac44a..6457cf9307fa1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TDigestPercentilesIT.java @@ -74,6 +74,10 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase { + public TDigestPercentilesIT(Settings dynamicSettings) { + super(dynamicSettings); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(AggregationTestScriptsPlugin.class); @@ -597,5 +601,6 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java index 96aeccfc03fb1..10e51079cf389 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/TopHitsIT.java @@ -31,6 +31,8 @@ package org.opensearch.search.aggregations.metrics; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.Explanation; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.util.ArrayUtil; @@ -40,6 +42,7 @@ import org.opensearch.action.search.SearchType; import org.opensearch.common.document.DocumentField; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.MatchAllQueryBuilder; @@ -67,8 +70,10 @@ import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.Iterator; @@ -83,6 +88,7 @@ import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.matchQuery; import static org.opensearch.index.query.QueryBuilders.nestedQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.global; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.max; @@ -105,11 +111,28 @@ import static org.hamcrest.Matchers.sameInstance; @OpenSearchIntegTestCase.SuiteScopeTestCase() -public class TopHitsIT extends OpenSearchIntegTestCase { +public class TopHitsIT extends ParameterizedOpenSearchIntegTestCase { private static final String TERMS_AGGS_FIELD = "terms"; private static final String SORT_FIELD = "sort"; + public TopHitsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ValueCountIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ValueCountIT.java index 82e667bccc576..833d1ce3bb4c3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ValueCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/ValueCountIT.java @@ -31,8 +31,11 @@ package org.opensearch.search.aggregations.metrics; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.Plugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; @@ -42,7 +45,9 @@ import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -51,6 +56,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.count; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.global; @@ -67,7 +73,25 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ValueCountIT extends OpenSearchIntegTestCase { +public class ValueCountIT extends ParameterizedOpenSearchIntegTestCase { + + public ValueCountIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); @@ -363,6 +387,7 @@ public void testScriptCaching() throws Exception { .getMissCount(), equalTo(2L) ); + internalCluster().wipeIndices("cache_test_idx"); } public void testOrderByEmptyAggregation() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java index 6cd16a47e98d2..bec9203384026 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java @@ -32,8 +32,12 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -42,11 +46,15 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -58,7 +66,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class AvgBucketIT extends OpenSearchIntegTestCase { +public class AvgBucketIT extends ParameterizedOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -69,6 +77,23 @@ public class AvgBucketIT extends OpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; + public AvgBucketIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java index 926c708e99bd6..4c3129eb89e3b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java @@ -32,8 +32,12 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -47,9 +51,11 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -58,6 +64,7 @@ import java.util.function.Function; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.dateRange; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; @@ -69,7 +76,7 @@ import static org.hamcrest.Matchers.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class BucketScriptIT extends OpenSearchIntegTestCase { +public class BucketScriptIT extends ParameterizedOpenSearchIntegTestCase { private static final String FIELD_1_NAME = "field1"; private static final String FIELD_2_NAME = "field2"; @@ -83,6 +90,23 @@ public class BucketScriptIT extends OpenSearchIntegTestCase { private static int maxNumber; private static long date; + public BucketScriptIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java index 7b802478a46d8..a7b28add7373a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java @@ -32,8 +32,12 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -46,9 +50,11 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -57,6 +63,7 @@ import java.util.function.Function; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.PipelineAggregatorBuilders.bucketSelector; @@ -70,7 +77,7 @@ import static org.hamcrest.Matchers.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class BucketSelectorIT extends OpenSearchIntegTestCase { +public class BucketSelectorIT extends ParameterizedOpenSearchIntegTestCase { private static final String FIELD_1_NAME = "field1"; private static final String FIELD_2_NAME = "field2"; @@ -82,6 +89,23 @@ public class BucketSelectorIT extends OpenSearchIntegTestCase { private static int minNumber; private static int maxNumber; + public BucketSelectorIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java index 231aa2e078de6..2e4fd7a412118 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java @@ -32,10 +32,14 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.bucket.histogram.Histogram; @@ -44,15 +48,18 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -68,7 +75,7 @@ import static org.hamcrest.Matchers.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class BucketSortIT extends OpenSearchIntegTestCase { +public class BucketSortIT extends ParameterizedOpenSearchIntegTestCase { private static final String INDEX = "bucket-sort-it-data-index"; private static final String INDEX_WITH_GAPS = "bucket-sort-it-data-index-with-gaps"; @@ -78,6 +85,23 @@ public class BucketSortIT extends OpenSearchIntegTestCase { private static final String VALUE_1_FIELD = "value_1"; private static final String VALUE_2_FIELD = "value_2"; + public BucketSortIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex(INDEX, INDEX_WITH_GAPS); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java index 2c7890fb7b1cb..b05ff7b4329cd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java @@ -32,10 +32,14 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateFormatters; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalMultiBucketAggregation; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -44,6 +48,7 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.support.AggregationPath; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.Matcher; import org.junit.After; @@ -55,9 +60,11 @@ import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.PipelineAggregatorBuilders.derivative; @@ -69,13 +76,30 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DateDerivativeIT extends OpenSearchIntegTestCase { +public class DateDerivativeIT extends ParameterizedOpenSearchIntegTestCase { // some index names used during these tests private static final String IDX_DST_START = "idx_dst_start"; private static final String IDX_DST_END = "idx_dst_end"; private static final String IDX_DST_KATHMANDU = "idx_dst_kathmandu"; + public DateDerivativeIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + private ZonedDateTime date(int month, int day) { return ZonedDateTime.of(2012, month, day, 0, 0, 0, 0, ZoneOffset.UTC); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java index 5cff68001c8d5..18484c8a60ed7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java @@ -32,10 +32,14 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.aggregations.InternalAggregation; @@ -47,14 +51,18 @@ import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.search.aggregations.support.AggregationPath; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.filters; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -70,7 +78,7 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class DerivativeIT extends OpenSearchIntegTestCase { +public class DerivativeIT extends ParameterizedOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -92,6 +100,23 @@ public class DerivativeIT extends OpenSearchIntegTestCase { private static Double[] firstDerivValueCounts_empty_rnd; private static long numDocsEmptyIdx_rnd; + public DerivativeIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); @@ -680,6 +705,7 @@ public void testAvgMovavgDerivNPE() throws Exception { .get(); assertSearchResponse(response); + internalCluster().wipeIndices("movavg_npe"); } private void checkBucketKeyAndDocCount( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index 85fe794b05fc6..299827e2413d4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -32,10 +32,14 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -45,11 +49,15 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -61,7 +69,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class ExtendedStatsBucketIT extends OpenSearchIntegTestCase { +public class ExtendedStatsBucketIT extends ParameterizedOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -72,6 +80,23 @@ public class ExtendedStatsBucketIT extends OpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; + public ExtendedStatsBucketIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java index 8cc71f91aae5c..529d7c8bfa2fc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java @@ -32,9 +32,13 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.WriteRequest; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -54,12 +58,16 @@ import org.opensearch.search.aggregations.metrics.SumAggregationBuilder; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; @@ -72,7 +80,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class MaxBucketIT extends OpenSearchIntegTestCase { +public class MaxBucketIT extends ParameterizedOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -83,6 +91,23 @@ public class MaxBucketIT extends OpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; + public MaxBucketIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); @@ -519,13 +544,13 @@ public void testNested() throws Exception { /** * https://github.com/elastic/elasticsearch/issues/33514 - * + *

      * This bug manifests as the max_bucket agg ("peak") being added to the response twice, because * the pipeline agg is run twice. This makes invalid JSON and breaks conversion to maps. * The bug was caused by an UnmappedTerms being the chosen as the first reduction target. UnmappedTerms * delegated reduction to the first non-unmapped agg, which would reduce and run pipeline aggs. But then * execution returns to the UnmappedTerms and _it_ runs pipelines as well, doubling up on the values. - * + *

      * Applies to any pipeline agg, not just max. */ public void testFieldIsntWrittenOutTwice() throws Exception { @@ -587,5 +612,6 @@ public void testFieldIsntWrittenOutTwice() throws Exception { SearchResponse response = client().prepareSearch("foo_*").setSize(0).addAggregation(groupByLicenseAgg).get(); BytesReference bytes = org.opensearch.core.xcontent.XContentHelper.toXContent(response, MediaTypeRegistry.JSON, false); XContentHelper.convertToMap(bytes, false, MediaTypeRegistry.JSON); + internalCluster().wipeIndices("foo_*"); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java index a9f5aa81c9e70..3c8d3e2064014 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java @@ -32,8 +32,12 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -42,11 +46,15 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -58,7 +66,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class MinBucketIT extends OpenSearchIntegTestCase { +public class MinBucketIT extends ParameterizedOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -69,6 +77,23 @@ public class MinBucketIT extends OpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; + public MinBucketIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java index b53183a627ecc..2aad0d2d38901 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.bulk.BulkRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; @@ -40,12 +42,15 @@ import org.opensearch.action.support.WriteRequest; import org.opensearch.client.Client; import org.opensearch.common.collect.EvictingQueue; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.opensearch.search.aggregations.metrics.Avg; import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; @@ -57,6 +62,7 @@ import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.max; @@ -71,7 +77,7 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class MovAvgIT extends OpenSearchIntegTestCase { +public class MovAvgIT extends ParameterizedOpenSearchIntegTestCase { private static final String INTERVAL_FIELD = "l_value"; private static final String VALUE_FIELD = "v_value"; private static final String VALUE_FIELD2 = "v_value2"; @@ -127,6 +133,23 @@ public String toString() { } } + public MovAvgIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { prepareCreate("idx").setMapping( @@ -1145,7 +1168,7 @@ public void testHoltWintersMinimization() { * the default settings. Which means our mock histo will match the generated result (which it won't * if the minimizer is actually working, since the coefficients will be different and thus generate different * data) - * + *

      * We can simulate this by setting the window size == size of histo */ public void testMinimizeNotEnoughData() { @@ -1341,6 +1364,7 @@ public void testPredictWithNonEmptyBuckets() throws Exception { assertThat(movAvgAgg, nullValue()); } } + internalCluster().wipeIndices("predict_non_empty"); } private void assertValidIterators(Iterator expectedBucketIter, Iterator expectedCountsIter, Iterator expectedValuesIter) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java index 1da079781dc63..580497715ed6d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java @@ -32,10 +32,14 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; @@ -43,14 +47,17 @@ import org.opensearch.search.aggregations.metrics.Percentile; import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -62,7 +69,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class PercentilesBucketIT extends OpenSearchIntegTestCase { +public class PercentilesBucketIT extends ParameterizedOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; private static final double[] PERCENTS = { 0.0, 1.0, 25.0, 50.0, 75.0, 99.0, 100.0 }; @@ -73,6 +80,23 @@ public class PercentilesBucketIT extends OpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; + public PercentilesBucketIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java index f5a5d025946ec..b4da63802bc50 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java @@ -32,22 +32,30 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.collect.EvictingQueue; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.max; @@ -61,7 +69,7 @@ import static org.hamcrest.core.IsNull.nullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class SerialDiffIT extends OpenSearchIntegTestCase { +public class SerialDiffIT extends ParameterizedOpenSearchIntegTestCase { private static final String INTERVAL_FIELD = "l_value"; private static final String VALUE_FIELD = "v_value"; @@ -90,6 +98,23 @@ public String toString() { } } + public SerialDiffIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + private ValuesSourceAggregationBuilder> randomMetric(String name, String field) { int rand = randomIntBetween(0, 3); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java index e9f34f6aa65d9..21fdd5e761e77 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java @@ -32,8 +32,12 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -42,11 +46,15 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -58,10 +66,9 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class StatsBucketIT extends OpenSearchIntegTestCase { +public class StatsBucketIT extends ParameterizedOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; - static int numDocs; static int interval; static int minRandomValue; @@ -69,6 +76,23 @@ public class StatsBucketIT extends OpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; + public StatsBucketIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java index 5bd962017c247..d4bd8f21b2a99 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java @@ -32,8 +32,12 @@ package org.opensearch.search.aggregations.pipeline; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.bucket.histogram.Histogram; import org.opensearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -42,11 +46,15 @@ import org.opensearch.search.aggregations.metrics.Sum; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -58,7 +66,7 @@ import static org.hamcrest.core.IsNull.notNullValue; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class SumBucketIT extends OpenSearchIntegTestCase { +public class SumBucketIT extends ParameterizedOpenSearchIntegTestCase { private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; @@ -69,6 +77,23 @@ public class SumBucketIT extends OpenSearchIntegTestCase { static int numValueBuckets; static long[] valueCounts; + public SumBucketIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java index 8eb5167ca9e51..28ada82a1c56b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java @@ -8,6 +8,8 @@ package org.opensearch.search.backpressure; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; @@ -19,6 +21,7 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.io.stream.StreamInput; @@ -34,6 +37,7 @@ import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import org.hamcrest.MatcherAssert; @@ -42,6 +46,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -50,16 +55,34 @@ import java.util.concurrent.TimeUnit; import java.util.function.Supplier; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class SearchBackpressureIT extends OpenSearchIntegTestCase { +public class SearchBackpressureIT extends ParameterizedOpenSearchIntegTestCase { private static final TimeValue TIMEOUT = new TimeValue(10, TimeUnit.SECONDS); private static final int MOVING_AVERAGE_WINDOW_SIZE = 10; + public SearchBackpressureIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Collection> nodePlugins() { final List> plugins = new ArrayList<>(super.nodePlugins()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java index d88893d1bcd71..44c4981dfdb36 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchRedStateIndexIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.basic; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; @@ -40,13 +42,18 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.rest.RestStatus; import org.opensearch.search.SearchService; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.junit.After; +import java.util.Arrays; +import java.util.Collection; import java.util.List; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -54,7 +61,24 @@ import static org.hamcrest.Matchers.lessThan; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SearchRedStateIndexIT extends OpenSearchIntegTestCase { +public class SearchRedStateIndexIT extends ParameterizedOpenSearchIntegTestCase { + + public SearchRedStateIndexIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } public void testAllowPartialsWithRedState() throws Exception { final int numShards = cluster().numDataNodes() + 2; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java index 6099c5342a9d3..71af7215c4eb7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileCreatingIndexIT.java @@ -32,13 +32,21 @@ package org.opensearch.search.basic; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.refresh.RefreshResponse; import org.opensearch.action.search.SearchResponse; import org.opensearch.client.Client; import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -46,7 +54,25 @@ * This test basically verifies that search with a single shard active (cause we indexed to it) and other * shards possibly not active at all (cause they haven't allocated) will still work. */ -public class SearchWhileCreatingIndexIT extends OpenSearchIntegTestCase { +public class SearchWhileCreatingIndexIT extends ParameterizedOpenSearchIntegTestCase { + + public SearchWhileCreatingIndexIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public void testIndexCausesIndexCreation() throws Exception { searchWhileCreatingIndex(false, 1); // 1 replica in our default... } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java index c184d876dcb33..4f0dda9adfa52 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java @@ -32,28 +32,52 @@ package org.opensearch.search.basic; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.SearchHits; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicBoolean; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoTimeout; import static org.opensearch.test.hamcrest.OpenSearchAssertions.formatShardStatus; import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SearchWhileRelocatingIT extends OpenSearchIntegTestCase { +public class SearchWhileRelocatingIT extends ParameterizedOpenSearchIntegTestCase { + + public SearchWhileRelocatingIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } public void testSearchAndRelocateConcurrentlyRandomReplicas() throws Exception { testSearchAndRelocateConcurrently(randomIntBetween(0, 1)); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomExceptionsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomExceptionsIT.java index b6da477d1b23e..aa82b9d21c7fb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomExceptionsIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.basic; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.LeafReader; @@ -47,12 +49,14 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.index.MockEngineFactoryPlugin; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.engine.MockEngineSupport; import org.opensearch.test.engine.ThrowingLeafReaderWrapper; @@ -64,9 +68,27 @@ import java.util.Random; import java.util.concurrent.ExecutionException; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -public class SearchWithRandomExceptionsIT extends OpenSearchIntegTestCase { +public class SearchWithRandomExceptionsIT extends ParameterizedOpenSearchIntegTestCase { + + public SearchWithRandomExceptionsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java index 0bcd945ba47b3..446a0bce58d66 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.basic; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.English; import org.opensearch.OpenSearchException; import org.opensearch.action.DocWriteResponse; @@ -43,11 +45,13 @@ import org.opensearch.client.Requests; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.store.MockFSDirectoryFactory; import org.opensearch.test.store.MockFSIndexStore; @@ -56,10 +60,28 @@ import java.util.Collection; import java.util.concurrent.ExecutionException; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; -public class SearchWithRandomIOExceptionsIT extends OpenSearchIntegTestCase { +public class SearchWithRandomIOExceptionsIT extends ParameterizedOpenSearchIntegTestCase { + + public SearchWithRandomIOExceptionsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java index 841821b5bbad6..cbe52abf5279b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportSearchFailuresIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.basic; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.indices.refresh.RefreshResponse; @@ -41,23 +43,46 @@ import org.opensearch.client.Requests; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.client.Requests.clusterHealthRequest; import static org.opensearch.client.Requests.refreshRequest; import static org.opensearch.client.Requests.searchRequest; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class TransportSearchFailuresIT extends OpenSearchIntegTestCase { +public class TransportSearchFailuresIT extends ParameterizedOpenSearchIntegTestCase { + + public TransportSearchFailuresIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected int maximumNumberOfReplicas() { return 1; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java index 069559d6d11b1..ce5f3f63faa66 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/TransportTwoNodesSearchIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.basic; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchPhaseExecutionException; @@ -39,6 +41,7 @@ import org.opensearch.client.Requests; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.MatchQueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -51,9 +54,11 @@ import org.opensearch.search.aggregations.bucket.global.Global; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Set; import java.util.TreeSet; @@ -66,6 +71,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.builder.SearchSourceBuilder.searchSource; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; @@ -74,7 +80,24 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; -public class TransportTwoNodesSearchIT extends OpenSearchIntegTestCase { +public class TransportTwoNodesSearchIT extends ParameterizedOpenSearchIntegTestCase { + + public TransportTwoNodesSearchIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected int numberOfReplicas() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java index 127bd3176453b..86df25c4dad65 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/FetchSubPhasePluginIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.fetch; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Terms; @@ -39,6 +41,8 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.document.DocumentField; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -48,12 +52,13 @@ import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.SearchExtBuilder; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -64,11 +69,30 @@ import static java.util.Collections.singletonList; import static org.opensearch.client.Requests.indexRequest; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.CoreMatchers.equalTo; @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 2) -public class FetchSubPhasePluginIT extends OpenSearchIntegTestCase { +public class FetchSubPhasePluginIT extends ParameterizedOpenSearchIntegTestCase { + + public FetchSubPhasePluginIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Collection> nodePlugins() { return Collections.singletonList(FetchTermVectorsPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java index f43918e28b9b5..9b3e1337418cc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/InnerHitsIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.fetch.subphase; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.util.ArrayUtil; import org.opensearch.action.index.IndexRequestBuilder; @@ -39,6 +41,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.query.BoolQueryBuilder; @@ -55,7 +58,7 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; @@ -72,6 +75,7 @@ import static org.opensearch.index.query.QueryBuilders.matchQuery; import static org.opensearch.index.query.QueryBuilders.nestedQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -84,7 +88,24 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class InnerHitsIT extends OpenSearchIntegTestCase { +public class InnerHitsIT extends ParameterizedOpenSearchIntegTestCase { + + public InnerHitsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java index e9cad63cbac94..23b5d0cab0697 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/MatchedQueriesIT.java @@ -32,7 +32,11 @@ package org.opensearch.search.fetch.subphase; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentHelper; @@ -41,7 +45,10 @@ import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.SearchHit; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.index.query.QueryBuilders.boolQuery; import static org.opensearch.index.query.QueryBuilders.constantScoreQuery; @@ -52,11 +59,30 @@ import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.index.query.QueryBuilders.termsQuery; import static org.opensearch.index.query.QueryBuilders.wrapperQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItemInArray; -public class MatchedQueriesIT extends OpenSearchIntegTestCase { +public class MatchedQueriesIT extends ParameterizedOpenSearchIntegTestCase { + + public MatchedQueriesIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public void testSimpleMatchedQueryFromFilteredQuery() throws Exception { createIndex("test"); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java index 7df5b9b88a69c..fe17c3e22d43c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/CustomHighlighterSearchIT.java @@ -31,12 +31,16 @@ package org.opensearch.search.fetch.subphase.highlight; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.junit.Before; import java.io.IOException; @@ -45,6 +49,7 @@ import java.util.HashMap; import java.util.Map; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHighlight; import static org.hamcrest.Matchers.equalTo; @@ -52,7 +57,24 @@ * Integration test for highlighters registered by a plugin. */ @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1) -public class CustomHighlighterSearchIT extends OpenSearchIntegTestCase { +public class CustomHighlighterSearchIT extends ParameterizedOpenSearchIntegTestCase { + + public CustomHighlighterSearchIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 2e70029cfb9f6..c12fbda500673 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -31,6 +31,7 @@ package org.opensearch.search.fetch.subphase.highlight; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.analysis.Analyzer; @@ -48,6 +49,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; import org.opensearch.common.time.DateFormatter; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; @@ -73,7 +75,7 @@ import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.MockKeywordPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.Matcher; import org.hamcrest.Matchers; @@ -107,6 +109,7 @@ import static org.opensearch.index.query.QueryBuilders.regexpQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.index.query.QueryBuilders.wildcardQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.builder.SearchSourceBuilder.highlight; import static org.opensearch.search.builder.SearchSourceBuilder.searchSource; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -125,16 +128,34 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; -public class HighlighterSearchIT extends OpenSearchIntegTestCase { +public class HighlighterSearchIT extends ParameterizedOpenSearchIntegTestCase { + // TODO as we move analyzers out of the core we need to move some of these into HighlighterWithAnalyzersTests private static final String[] ALL_TYPES = new String[] { "plain", "fvh", "unified" }; + public HighlighterSearchIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, MockKeywordPlugin.class, MockAnalysisPlugin.class); } - public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOException { + public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder(); mappings.startObject(); mappings.startObject("properties") @@ -156,6 +177,7 @@ public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOExceptio .setSource(jsonBuilder().startObject().array("tags", "foo baz", "foo baz", "foo baz", "foo bar").field("sort", 2).endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); for (BoundaryScannerType scanner : BoundaryScannerType.values()) { SearchResponse search = client().prepareSearch() @@ -169,12 +191,13 @@ public void testHighlightingWithKeywordIgnoreBoundaryScanner() throws IOExceptio } } - public void testHighlightingWithStoredKeyword() throws IOException { + public void testHighlightingWithStoredKeyword() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder(); mappings.startObject(); mappings.startObject("properties").startObject("text").field("type", "keyword").field("store", true).endObject().endObject(); mappings.endObject(); assertAcked(prepareCreate("test").setMapping(mappings)); + indexRandomForConcurrentSearch("test"); client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "foo").endObject()).get(); refresh(); SearchResponse search = client().prepareSearch() @@ -184,7 +207,7 @@ public void testHighlightingWithStoredKeyword() throws IOException { assertHighlight(search, 0, "text", 0, equalTo("foo")); } - public void testHighlightingWithWildcardName() throws IOException { + public void testHighlightingWithWildcardName() throws IOException, InterruptedException { // test the kibana case with * as fieldname that will try highlight all fields including meta fields XContentBuilder mappings = jsonBuilder(); mappings.startObject(); @@ -200,6 +223,7 @@ public void testHighlightingWithWildcardName() throws IOException { assertAcked(prepareCreate("test").setMapping(mappings)); client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("text", "text").endObject()).get(); refresh(); + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { SearchResponse search = client().prepareSearch() .setQuery(constantScoreQuery(matchQuery("text", "text"))) @@ -209,7 +233,7 @@ public void testHighlightingWithWildcardName() throws IOException { } } - public void testFieldAlias() throws IOException { + public void testFieldAlias() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder().startObject() .startObject("properties") .startObject("text") @@ -227,7 +251,7 @@ public void testFieldAlias() throws IOException { client().prepareIndex("test").setId("1").setSource("text", "foo").get(); refresh(); - + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { HighlightBuilder builder = new HighlightBuilder().field(new Field("alias").highlighterType(type)) .requireFieldMatch(randomBoolean()); @@ -236,7 +260,7 @@ public void testFieldAlias() throws IOException { } } - public void testFieldAliasWithSourceLookup() throws IOException { + public void testFieldAliasWithSourceLookup() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder().startObject() .startObject("properties") .startObject("text") @@ -255,7 +279,7 @@ public void testFieldAliasWithSourceLookup() throws IOException { client().prepareIndex("test").setId("1").setSource("text", "foo bar").get(); refresh(); - + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { HighlightBuilder builder = new HighlightBuilder().field(new Field("alias").highlighterType(type)) .requireFieldMatch(randomBoolean()); @@ -264,7 +288,7 @@ public void testFieldAliasWithSourceLookup() throws IOException { } } - public void testFieldAliasWithWildcardField() throws IOException { + public void testFieldAliasWithWildcardField() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder().startObject() .startObject("properties") .startObject("keyword") @@ -280,13 +304,14 @@ public void testFieldAliasWithWildcardField() throws IOException { client().prepareIndex("test").setId("1").setSource("keyword", "foo").get(); refresh(); + indexRandomForConcurrentSearch("test"); HighlightBuilder builder = new HighlightBuilder().field(new Field("al*")).requireFieldMatch(false); SearchResponse search = client().prepareSearch().setQuery(matchQuery("alias", "foo")).highlighter(builder).get(); assertHighlight(search, 0, "alias", 0, equalTo("foo")); } - public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOException { + public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOException, InterruptedException { XContentBuilder mappings = jsonBuilder(); mappings.startObject(); mappings.startObject("_source") @@ -313,6 +338,7 @@ public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOExc .setSource(jsonBuilder().startObject().field("unstored_text", "text").field("text", "text").endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { SearchResponse search = client().prepareSearch() .setQuery(constantScoreQuery(matchQuery("text", "text"))) @@ -329,7 +355,7 @@ public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOExc } // see #3486 - public void testHighTermFrequencyDoc() throws IOException { + public void testHighTermFrequencyDoc() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping("name", "type=text,term_vector=with_positions_offsets,store=" + randomBoolean())); StringBuilder builder = new StringBuilder(); for (int i = 0; i < 6000; i++) { @@ -337,6 +363,7 @@ public void testHighTermFrequencyDoc() throws IOException { } client().prepareIndex("test").setId("1").setSource("name", builder.toString()).get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setQuery(constantScoreQuery(matchQuery("name", "abc"))) .highlighter(new HighlightBuilder().field("name")) @@ -364,6 +391,7 @@ public void testEnsureNoNegativeOffsets() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setQuery(matchQuery("long_term", "thisisaverylongwordandmakessurethisfails foo highlighed")) .highlighter(new HighlightBuilder().field("long_term", 18, 1).highlighterType("fvh")) @@ -650,7 +678,7 @@ public void testHighlightIssue1994() throws Exception { assertHighlight(search, 0, "titleTV", 1, 2, equalTo("highlight other text")); } - public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() { + public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() throws InterruptedException { createIndex("test"); ensureGreen(); @@ -663,6 +691,7 @@ public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1 and field2 produces different tags"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "test")) @@ -713,6 +742,7 @@ public void testHighlightingOnWildcardFields() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field*"); SearchSourceBuilder source = searchSource() @@ -762,6 +792,7 @@ public void testForceSourceWithSourceDisabled() throws Exception { .setSource("field1", "The quick brown fox jumps over the lazy dog", "field2", "second field content") .get(); refresh(); + indexRandomForConcurrentSearch("test"); // works using stored field SearchResponse searchResponse = client().prepareSearch("test") @@ -802,6 +833,7 @@ public void testPlainHighlighter() throws Exception { client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "test")) @@ -1004,6 +1036,7 @@ public void testFVHManyMatches() throws Exception { String value = new String(new char[1024 * 256 / pattern.length()]).replace("\0", pattern); client().prepareIndex("test").setSource("field1", value).get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1 with default phrase limit"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "t")) @@ -1095,6 +1128,7 @@ private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception ); index("test", "type1", "3", "foo", "weird", "bar", "result"); refresh(); + indexRandomForConcurrentSearch("test"); Field fooField = new Field("foo").numOfFragments(1) .order("score") @@ -1387,6 +1421,7 @@ public void testMultiMapperVectorWithStore() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse search = client().prepareSearch() @@ -1432,6 +1467,7 @@ public void testMultiMapperVectorFromSource() throws Exception { client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse search = client().prepareSearch() @@ -1477,6 +1513,7 @@ public void testMultiMapperNoVectorWithStore() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse search = client().prepareSearch() @@ -1521,6 +1558,7 @@ public void testMultiMapperNoVectorFromSource() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse search = client().prepareSearch() @@ -1550,6 +1588,7 @@ public void testFastVectorHighlighterShouldFailIfNoTermVectors() throws Exceptio .setSource("title", "This is a test for the enabling fast vector highlighter"); } indexRandom(true, indexRequestBuilders); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setQuery(matchPhraseQuery("title", "this is a test")) @@ -1587,6 +1626,7 @@ public void testDisableFastVectorHighlighter() throws Exception { .setSource("title", "This is a test for the workaround for the fast vector highlighting SOLR-3724"); } indexRandom(true, indexRequestBuilders); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setQuery(matchPhraseQuery("title", "test for the workaround")) @@ -1648,6 +1688,7 @@ public void testFSHHighlightAllMvFragments() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "tag")) @@ -1665,11 +1706,12 @@ public void testFSHHighlightAllMvFragments() throws Exception { ); } - public void testBoostingQuery() { + public void testBoostingQuery() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -1681,11 +1723,12 @@ public void testBoostingQuery() { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); } - public void testBoostingQueryTermVector() throws IOException { + public void testBoostingQueryTermVector() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -1697,12 +1740,13 @@ public void testBoostingQueryTermVector() throws IOException { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); } - public void testCommonTermsQuery() { + public void testCommonTermsQuery() throws InterruptedException { createIndex("test"); ensureGreen(); client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) @@ -1712,12 +1756,13 @@ public void testCommonTermsQuery() { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog")); } - public void testCommonTermsTermVector() throws IOException { + public void testCommonTermsTermVector() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1TermVectorMapping())); ensureGreen(); client().prepareIndex("test").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) .highlighter(highlight().field("field2").order("score").preTags("").postTags("")); @@ -1743,6 +1788,7 @@ public void testPlainHighlightDifferentFragmenter() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchPhraseQuery("tags", "long tag")) @@ -1795,12 +1841,13 @@ public void testPlainHighlightDifferentFragmenter() throws Exception { ); } - public void testPlainHighlighterMultipleFields() { + public void testPlainHighlighterMultipleFields() throws InterruptedException { createIndex("test"); ensureGreen(); index("test", "type1", "1", "field1", "The quick brown fox", "field2", "The slow brown fox"); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("field1", "fox")) @@ -1813,7 +1860,7 @@ public void testPlainHighlighterMultipleFields() { assertHighlight(response, 0, "field2", 0, 1, equalTo("The slow brown <2>fox")); } - public void testFastVectorHighlighterMultipleFields() { + public void testFastVectorHighlighterMultipleFields() throws InterruptedException { assertAcked( prepareCreate("test").setMapping( "field1", @@ -1826,6 +1873,7 @@ public void testFastVectorHighlighterMultipleFields() { index("test", "type1", "1", "field1", "The quick brown fox", "field2", "The slow brown fox"); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("field1", "fox")) @@ -1843,6 +1891,7 @@ public void testMissingStoredField() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource(jsonBuilder().startObject().field("field", "highlight").endObject()).get(); refresh(); + indexRandomForConcurrentSearch("test"); // This query used to fail when the field to highlight was absent SearchResponse response = client().prepareSearch("test") @@ -1883,6 +1932,7 @@ public void testNumericHighlighting() throws Exception { .setSource("text", "opensearch test", "byte", 25, "short", 42, "int", 100, "long", -1, "float", 3.2f, "double", 42.42) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("text", "test")) @@ -1905,6 +1955,7 @@ public void testResetTwice() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("text", "opensearch test").get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("text", "test")) @@ -1914,7 +1965,7 @@ public void testResetTwice() throws Exception { assertHitCount(response, 1L); } - public void testHighlightUsesHighlightQuery() throws IOException { + public void testHighlightUsesHighlightQuery() throws IOException, InterruptedException { assertAcked( prepareCreate("test").setMapping( "text", @@ -1925,6 +1976,7 @@ public void testHighlightUsesHighlightQuery() throws IOException { index("test", "type1", "1", "text", "Testing the highlight query feature"); refresh(); + indexRandomForConcurrentSearch("test"); for (String type : ALL_TYPES) { HighlightBuilder.Field field = new HighlightBuilder.Field("text"); @@ -1960,7 +2012,11 @@ private static String randomStoreField() { return ""; } - public void testHighlightNoMatchSize() throws IOException { + public void testHighlightNoMatchSize() throws IOException, InterruptedException { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10900", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setMapping( "text", @@ -1972,6 +2028,7 @@ public void testHighlightNoMatchSize() throws IOException { String text = "I am pretty long so some of me should get cut off. Second sentence"; index("test", "type1", "1", "text", text); refresh(); + indexRandomForConcurrentSearch("test"); // When you don't set noMatchSize you don't get any results if there isn't anything to highlight. HighlightBuilder.Field field = new HighlightBuilder.Field("text").fragmentSize(21).numOfFragments(1).highlighterType("plain"); @@ -2070,7 +2127,11 @@ public void testHighlightNoMatchSize() throws IOException { assertNotHighlighted(response, 0, "text"); } - public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { + public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException, InterruptedException { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10900", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setMapping( "text", @@ -2083,6 +2144,7 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { String text2 = "I am short"; index("test", "type1", "1", "text", new String[] { text1, text2 }); refresh(); + indexRandomForConcurrentSearch("test"); // The no match fragment should come from the first value of a multi-valued field HighlightBuilder.Field field = new HighlightBuilder.Field("text").fragmentSize(21) @@ -2165,7 +2227,11 @@ public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException { assertNotHighlighted(response, 0, "text"); } - public void testHighlightNoMatchSizeNumberOfFragments() throws IOException { + public void testHighlightNoMatchSizeNumberOfFragments() throws IOException, InterruptedException { + assumeFalse( + "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10900", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); assertAcked( prepareCreate("test").setMapping( "text", @@ -2179,6 +2245,7 @@ public void testHighlightNoMatchSizeNumberOfFragments() throws IOException { String text3 = "This is the fifth sentence"; index("test", "type1", "1", "text", new String[] { text1, text2, text3 }); refresh(); + indexRandomForConcurrentSearch("test"); // The no match fragment should come from the first value of a multi-valued field HighlightBuilder.Field field = new HighlightBuilder.Field("text").fragmentSize(1) @@ -2222,6 +2289,7 @@ public void testPostingsHighlighter() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy quick dog") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "test")) @@ -2299,6 +2367,7 @@ public void testPostingsHighlighterMultipleFields() throws Exception { "The slow brown fox. Second sentence." ); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("field1", "fox")) @@ -2323,6 +2392,7 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "fox")) @@ -2355,6 +2425,7 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); source = searchSource().query(termQuery("field1", "fox")) .highlighter(highlight().field(new Field("field1").numOfFragments(0).preTags("").postTags(""))); @@ -2391,7 +2462,7 @@ public void testPostingsHighlighterNumberOfFragments() throws Exception { } } - public void testMultiMatchQueryHighlight() throws IOException { + public void testMultiMatchQueryHighlight() throws IOException, InterruptedException { XContentBuilder mapping = XContentFactory.jsonBuilder() .startObject() .startObject("properties") @@ -2413,6 +2484,7 @@ public void testMultiMatchQueryHighlight() throws IOException { .setSource("field1", "The quick brown fox jumps over", "field2", "The quick brown fox jumps over") .get(); refresh(); + indexRandomForConcurrentSearch("test"); final int iters = scaledRandomIntBetween(20, 30); for (int i = 0; i < iters; i++) { String highlighterType = rarely() ? null : RandomPicks.randomFrom(random(), ALL_TYPES); @@ -2458,6 +2530,7 @@ public void testPostingsHighlighterOrderByScore() throws Exception { ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(termQuery("field1", "sentence")) @@ -2544,6 +2617,7 @@ public void testPostingsHighlighterMultiMapperWithStore() throws Exception { ensureGreen(); client().prepareIndex("test").setId("1").setSource("title", "this is a test . Second sentence.").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse searchResponse = client().prepareSearch() @@ -2602,6 +2676,7 @@ public void testPostingsHighlighterMultiMapperFromSource() throws Exception { client().prepareIndex("test").setId("1").setSource("title", "this is a test").get(); refresh(); + indexRandomForConcurrentSearch("test"); // simple search on body with standard analyzer with a simple field query SearchResponse searchResponse = client().prepareSearch() @@ -2651,13 +2726,14 @@ public void testPostingsHighlighterShouldFailIfNoOffsets() throws Exception { assertNoFailures(search); } - public void testPostingsHighlighterBoostingQuery() throws IOException { + public void testPostingsHighlighterBoostingQuery() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); client().prepareIndex("test") .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -2668,7 +2744,7 @@ public void testPostingsHighlighterBoostingQuery() throws IOException { assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy dog! Second sentence.")); } - public void testPostingsHighlighterCommonTermsQuery() throws IOException { + public void testPostingsHighlighterCommonTermsQuery() throws IOException, InterruptedException { assertAcked(prepareCreate("test").setMapping(type1PostingsffsetsMapping())); ensureGreen(); @@ -2676,6 +2752,7 @@ public void testPostingsHighlighterCommonTermsQuery() throws IOException { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100)) @@ -2717,6 +2794,7 @@ public void testPostingsHighlighterPrefixQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(prefixQuery("field2", "qui")).highlighter(highlight().field("field2")); @@ -2739,6 +2817,7 @@ public void testPostingsHighlighterFuzzyQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(fuzzyQuery("field2", "quck")).highlighter(highlight().field("field2")); @@ -2762,6 +2841,7 @@ public void testPostingsHighlighterRegexpQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(regexpQuery("field2", "qu[a-l]+k")).highlighter(highlight().field("field2")); @@ -2785,6 +2865,7 @@ public void testPostingsHighlighterWildcardQuery() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(wildcardQuery("field2", "qui*")).highlighter(highlight().field("field2")); @@ -2819,6 +2900,7 @@ public void testPostingsHighlighterTermRangeQuery() throws Exception { client().prepareIndex("test").setSource("field1", "this is a test", "field2", "aaab").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(rangeQuery("field2").gte("aaaa").lt("zzzz")) @@ -2836,6 +2918,7 @@ public void testPostingsHighlighterQueryString() throws Exception { .setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") .get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field2"); SearchSourceBuilder source = searchSource().query(queryStringQuery("qui*").defaultField("field2")) @@ -2857,6 +2940,7 @@ public void testPostingsHighlighterRegexpQueryWithinConstantScoreQuery() throws client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(constantScoreQuery(regexpQuery("field1", "pho[a-z]+"))) @@ -2871,6 +2955,7 @@ public void testPostingsHighlighterMultiTermQueryMultipleLevels() throws Excepti client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -2888,6 +2973,7 @@ public void testPostingsHighlighterPrefixQueryWithinBooleanQuery() throws Except client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -2903,6 +2989,7 @@ public void testPostingsHighlighterQueryStringWithinFilteredQuery() throws Excep client().prepareIndex("test").setSource("field1", "The photography word will get highlighted").get(); refresh(); + indexRandomForConcurrentSearch("test"); logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query( @@ -3007,7 +3094,7 @@ public void testFastVectorHighlighterPhraseBoost() throws Exception { * because it doesn't support the concept of terms having a different weight based on position. * @param highlighterType highlighter to test */ - private void phraseBoostTestCase(String highlighterType) { + private void phraseBoostTestCase(String highlighterType) throws InterruptedException { ensureGreen(); StringBuilder text = new StringBuilder(); text.append("words words junk junk junk junk junk junk junk junk highlight junk junk junk junk together junk\n"); @@ -3020,6 +3107,7 @@ private void phraseBoostTestCase(String highlighterType) { } index("test", "type1", "1", "field1", text.toString()); refresh(); + indexRandomForConcurrentSearch("test"); // Match queries phraseBoostTestCaseForClauses( @@ -3088,7 +3176,7 @@ private

      > void phraseBoostTestCaseForClauses( assertHighlight(response, 0, "field1", 0, 1, highlightedMatcher); } - public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOException { + public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOException, InterruptedException { // check that we do not get an exception for geo_point fields in case someone tries to highlight // it accidentially with a wildcard // see https://github.com/elastic/elasticsearch/issues/17537 @@ -3112,6 +3200,7 @@ public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOExcepti .setSource(jsonBuilder().startObject().field("text", "Arbitrary text field which will should not cause a failure").endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); String highlighterType = randomFrom(ALL_TYPES); QueryBuilder query = QueryBuilders.boolQuery() .should( @@ -3129,7 +3218,7 @@ public void testGeoFieldHighlightingWithDifferentHighlighters() throws IOExcepti assertThat(search.getHits().getAt(0).getHighlightFields().get("text").fragments().length, equalTo(1)); } - public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException { + public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException, InterruptedException { // same as above but in this example the query gets rewritten during highlighting // see https://github.com/elastic/elasticsearch/issues/17537#issuecomment-244939633 XContentBuilder mappings = jsonBuilder(); @@ -3156,6 +3245,7 @@ public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException ) .get(); refresh(); + indexRandomForConcurrentSearch("test"); QueryBuilder query = QueryBuilders.functionScoreQuery( QueryBuilders.boolQuery() @@ -3171,7 +3261,7 @@ public void testGeoFieldHighlightingWhenQueryGetsRewritten() throws IOException assertThat(search.getHits().getTotalHits().value, equalTo(1L)); } - public void testKeywordFieldHighlighting() throws IOException { + public void testKeywordFieldHighlighting() throws IOException, InterruptedException { // check that keyword highlighting works XContentBuilder mappings = jsonBuilder(); mappings.startObject(); @@ -3184,6 +3274,7 @@ public void testKeywordFieldHighlighting() throws IOException { .setSource(jsonBuilder().startObject().field("keyword_field", "some text").endObject()) .get(); refresh(); + indexRandomForConcurrentSearch("test"); SearchResponse search = client().prepareSearch() .setSource( new SearchSourceBuilder().query(QueryBuilders.matchQuery("keyword_field", "some text")) @@ -3217,6 +3308,7 @@ public void testCopyToFields() throws Exception { .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); SearchResponse response = client().prepareSearch() .setQuery(matchQuery("foo_copy", "brown")) .highlighter(new HighlightBuilder().field(new Field("foo_copy"))) @@ -3266,7 +3358,7 @@ public void testACopyFieldWithNestedQuery() throws Exception { ) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); - + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(nestedQuery("foo", matchQuery("foo.text", "brown cow"), ScoreMode.None)) .highlighter(new HighlightBuilder().field(new Field("foo_text").highlighterType("fvh")).requireFieldMatch(false)) @@ -3284,6 +3376,7 @@ public void testFunctionScoreQueryHighlight() throws Exception { .setSource(jsonBuilder().startObject().field("text", "brown").endObject()) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch() .setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"))) @@ -3301,6 +3394,7 @@ public void testFiltersFunctionScoreQueryHighlight() throws Exception { .setSource(jsonBuilder().startObject().field("text", "brown").field("enable", "yes").endObject()) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); FunctionScoreQueryBuilder.FilterFunctionBuilder filterBuilder = new FunctionScoreQueryBuilder.FilterFunctionBuilder( QueryBuilders.termQuery("enable", "yes"), new RandomScoreFunctionBuilder() @@ -3395,6 +3489,7 @@ public void testWithNestedQuery() throws Exception { ) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); for (String type : new String[] { "unified", "plain" }) { SearchResponse searchResponse = client().prepareSearch() @@ -3452,6 +3547,7 @@ public void testWithNormalizer() throws Exception { .setSource("keyword", "Hello World") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); for (String highlighterType : new String[] { "unified", "plain" }) { SearchResponse searchResponse = client().prepareSearch() @@ -3474,6 +3570,7 @@ public void testDisableHighlightIdField() throws Exception { .setSource("keyword", "Hello World") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); + indexRandomForConcurrentSearch("test"); for (String highlighterType : new String[] { "plain", "unified" }) { SearchResponse searchResponse = client().prepareSearch() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java index 21efcd738ae9f..f5d1b8234558e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -32,15 +32,19 @@ package org.opensearch.search.fieldcaps; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.fieldcaps.FieldCapabilities; import org.opensearch.action.fieldcaps.FieldCapabilitiesResponse; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.MapperPlugin; import org.opensearch.plugins.Plugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.junit.Before; import java.util.ArrayList; @@ -52,9 +56,27 @@ import java.util.function.Function; import java.util.function.Predicate; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -public class FieldCapabilitiesIT extends OpenSearchIntegTestCase { +public class FieldCapabilitiesIT extends ParameterizedOpenSearchIntegTestCase { + + public FieldCapabilitiesIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Before public void setUp() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java index 65798713bb577..799bbf91a567d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.fields; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; @@ -41,6 +43,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateUtils; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.core.common.bytes.BytesArray; @@ -60,7 +63,7 @@ import org.opensearch.search.lookup.FieldLookup; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; @@ -88,6 +91,7 @@ import static org.opensearch.common.util.set.Sets.newHashSet; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -100,7 +104,24 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class SearchFieldsIT extends OpenSearchIntegTestCase { +public class SearchFieldsIT extends ParameterizedOpenSearchIntegTestCase { + + public SearchFieldsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java index c888dcddb5611..6eb528e0bb7d3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/DecayFunctionScoreIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.functionscore; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.Version; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; @@ -44,6 +46,7 @@ import org.opensearch.common.lucene.search.function.FunctionScoreQuery; import org.opensearch.common.lucene.search.function.FunctionScoreQuery.ScoreMode; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; @@ -53,12 +56,14 @@ import org.opensearch.search.MultiValueMode; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Locale; @@ -72,6 +77,7 @@ import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.exponentialDecayFunction; import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.gaussDecayFunction; import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.linearDecayFunction; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.builder.SearchSourceBuilder.searchSource; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -85,7 +91,24 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; -public class DecayFunctionScoreIT extends OpenSearchIntegTestCase { +public class DecayFunctionScoreIT extends ParameterizedOpenSearchIntegTestCase { + + public DecayFunctionScoreIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected boolean forbidPrivateIndexSettings() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java index e0846e57b9115..62d0d89c644a5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.functionscore; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; @@ -41,6 +43,7 @@ import org.opensearch.common.lucene.search.function.CombineFunction; import org.opensearch.common.lucene.search.function.Functions; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.ScriptPlugin; @@ -53,9 +56,9 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.lookup.SearchLookup; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.IOException; @@ -73,13 +76,31 @@ import static org.opensearch.index.query.QueryBuilders.functionScoreQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.builder.SearchSourceBuilder.searchSource; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1) -public class ExplainableScriptIT extends OpenSearchIntegTestCase { +public class ExplainableScriptIT extends ParameterizedOpenSearchIntegTestCase { + + public ExplainableScriptIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } public static class ExplainableScriptPlugin extends Plugin implements ScriptPlugin { @Override @@ -180,8 +201,18 @@ public void testExplainScript() throws InterruptedException, IOException, Execut for (SearchHit hit : hits.getHits()) { assertThat(hit.getId(), equalTo(Integer.toString(idCounter))); assertThat(hit.getExplanation().toString(), containsString(Double.toString(idCounter))); - assertThat(hit.getExplanation().toString(), containsString("1 = n")); - assertThat(hit.getExplanation().toString(), containsString("1 = N")); + + // Since Apache Lucene 9.8, the scores are not computed because script (see please ExplainableScriptPlugin) + // says "needs_score() == false" + // 19.0 = min of: + // 19.0 = This script returned 19.0 + // 0.0 = _score: + // 0.0 = weight(text:text in 0) [PerFieldSimilarity], result of: + // 0.0 = score(freq=1.0), with freq of: + // 1.0 = freq, occurrences of term within document + // 3.4028235E38 = maxBoost + + assertThat(hit.getExplanation().toString(), containsString("1.0 = freq, occurrences of term within document")); assertThat(hit.getExplanation().getDetails().length, equalTo(2)); idCounter--; } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java index cf133396e6fcb..b09914c4aa764 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreFieldValueIT.java @@ -32,19 +32,26 @@ package org.opensearch.search.functionscore; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.lucene.search.function.FieldValueFactorFunction; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.SearchHit; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.functionScoreQuery; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.simpleQueryStringQuery; import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertOrderedSearchHits; @@ -54,7 +61,25 @@ /** * Tests for the {@code field_value_factor} function in a function_score query. */ -public class FunctionScoreFieldValueIT extends OpenSearchIntegTestCase { +public class FunctionScoreFieldValueIT extends ParameterizedOpenSearchIntegTestCase { + + public FunctionScoreFieldValueIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public void testFieldValueFactor() throws IOException { assertAcked( prepareCreate("test").setMapping( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java index 3d24933f66d17..88395f25700d2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScoreIT.java @@ -32,10 +32,14 @@ package org.opensearch.search.functionscore; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.lucene.search.function.CombineFunction; import org.opensearch.common.lucene.search.function.FunctionScoreQuery; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder.FilterFunctionBuilder; @@ -45,11 +49,12 @@ import org.opensearch.script.ScriptType; import org.opensearch.search.SearchHit; import org.opensearch.search.aggregations.bucket.terms.Terms; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -63,6 +68,7 @@ import static org.opensearch.index.query.QueryBuilders.functionScoreQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.search.builder.SearchSourceBuilder.searchSource; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -72,11 +78,28 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -public class FunctionScoreIT extends OpenSearchIntegTestCase { +public class FunctionScoreIT extends ParameterizedOpenSearchIntegTestCase { static final String TYPE = "type"; static final String INDEX = "index"; + public FunctionScoreIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Collection> nodePlugins() { return Collections.singleton(CustomScriptPlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java index 384d9f2c61042..1df4acac0dcf0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/FunctionScorePluginIT.java @@ -32,11 +32,15 @@ package org.opensearch.search.functionscore; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.Explanation; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchType; import org.opensearch.common.Priority; import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.query.functionscore.DecayFunction; @@ -46,9 +50,9 @@ import org.opensearch.plugins.Plugin; import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.SearchHits; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.IOException; @@ -62,11 +66,30 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.functionScoreQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.builder.SearchSourceBuilder.searchSource; import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope = Scope.SUITE, supportsDedicatedMasters = false, numDataNodes = 1) -public class FunctionScorePluginIT extends OpenSearchIntegTestCase { +public class FunctionScorePluginIT extends ParameterizedOpenSearchIntegTestCase { + + public FunctionScorePluginIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Collection> nodePlugins() { return Arrays.asList(CustomDistanceScorePlugin.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java index 34a304615b075..de4c85301547c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/QueryRescorerIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.functionscore; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.Explanation; import org.apache.lucene.tests.util.English; import org.opensearch.action.index.IndexRequestBuilder; @@ -41,6 +43,7 @@ import org.opensearch.common.lucene.search.function.CombineFunction; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.Settings.Builder; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.Operator; @@ -52,9 +55,10 @@ import org.opensearch.search.rescore.QueryRescoreMode; import org.opensearch.search.rescore.QueryRescorerBuilder; import org.opensearch.search.sort.SortBuilders; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.Arrays; +import java.util.Collection; import java.util.Comparator; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; @@ -70,6 +74,7 @@ import static org.opensearch.index.query.QueryBuilders.queryStringQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.weightFactorFunction; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFourthHit; @@ -86,7 +91,25 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; -public class QueryRescorerIT extends OpenSearchIntegTestCase { +public class QueryRescorerIT extends ParameterizedOpenSearchIntegTestCase { + + public QueryRescorerIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public void testEnforceWindowSize() { createIndex("test"); // this diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java index 2176b93079d02..8f43cefd2d53b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/RandomScoreFunctionIT.java @@ -31,8 +31,12 @@ package org.opensearch.search.functionscore; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.util.ArrayUtil; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.mapper.SeqNoFieldMapper; import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; @@ -43,7 +47,7 @@ import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.SearchHit; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.CoreMatchers; import java.util.Arrays; @@ -60,6 +64,7 @@ import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.randomFunction; import static org.opensearch.index.query.functionscore.ScoreFunctionBuilders.scriptFunction; import static org.opensearch.script.MockScriptPlugin.NAME; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.allOf; @@ -71,7 +76,24 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.nullValue; -public class RandomScoreFunctionIT extends OpenSearchIntegTestCase { +public class RandomScoreFunctionIT extends ParameterizedOpenSearchIntegTestCase { + + public RandomScoreFunctionIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java index 9c06082db31d4..00524c6e04707 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoFilterIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.geo; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; @@ -54,6 +56,7 @@ import org.opensearch.common.geo.builders.PointBuilder; import org.opensearch.common.geo.builders.PolygonBuilder; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.Streams; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesReference; @@ -61,7 +64,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import org.junit.BeforeClass; @@ -70,6 +73,8 @@ import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.Random; import java.util.zip.GZIPInputStream; @@ -84,6 +89,7 @@ import static org.opensearch.index.query.QueryBuilders.geoDistanceQuery; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.matchQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasId; @@ -93,7 +99,24 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; -public class GeoFilterIT extends OpenSearchIntegTestCase { +public class GeoFilterIT extends ParameterizedOpenSearchIntegTestCase { + + public GeoFilterIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected boolean forbidPrivateIndexSettings() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoPolygonIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoPolygonIT.java index d0b017732b270..85cb087585d31 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoPolygonIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoPolygonIT.java @@ -32,28 +32,52 @@ package org.opensearch.search.geo; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.Version; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.SearchHit; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.boolQuery; import static org.opensearch.index.query.QueryBuilders.geoPolygonQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class GeoPolygonIT extends OpenSearchIntegTestCase { +public class GeoPolygonIT extends ParameterizedOpenSearchIntegTestCase { + + public GeoPolygonIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected boolean forbidPrivateIndexSettings() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoShapeIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoShapeIntegrationIT.java index 98e9b35208f3e..1f9b6ae434f75 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoShapeIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/GeoShapeIntegrationIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.geo; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.ClusterState; @@ -39,22 +41,44 @@ import org.opensearch.common.geo.builders.PointBuilder; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexService; import org.opensearch.index.mapper.GeoShapeFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.indices.IndicesService; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.index.query.QueryBuilders.geoShapeQuery; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class GeoShapeIntegrationIT extends OpenSearchIntegTestCase { +public class GeoShapeIntegrationIT extends ParameterizedOpenSearchIntegTestCase { + + public GeoShapeIntegrationIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Settings nodeSettings(int nodeOrdinal) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/geo/LegacyGeoShapeIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/geo/LegacyGeoShapeIntegrationIT.java index 4ff7f49082901..d21d6036c9673 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/geo/LegacyGeoShapeIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/geo/LegacyGeoShapeIntegrationIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.geo; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.search.SearchResponse; @@ -39,6 +41,7 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContent; @@ -47,17 +50,37 @@ import org.opensearch.index.mapper.LegacyGeoShapeFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.indices.IndicesService; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.index.query.QueryBuilders.geoShapeQuery; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class LegacyGeoShapeIntegrationIT extends OpenSearchIntegTestCase { +public class LegacyGeoShapeIntegrationIT extends ParameterizedOpenSearchIntegTestCase { + + public LegacyGeoShapeIntegrationIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } /** * Test that orientation parameter correctly persists across cluster restart diff --git a/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java b/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java index 188a1f0d0e6d2..4197641e33f3b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.morelikethis; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.RoutingMissingException; import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; @@ -39,6 +41,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.MoreLikeThisQueryBuilder; @@ -47,10 +50,11 @@ import org.opensearch.index.query.QueryBuilders; import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -63,6 +67,7 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.moreLikeThisQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -74,7 +79,24 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; -public class MoreLikeThisIT extends OpenSearchIntegTestCase { +public class MoreLikeThisIT extends ParameterizedOpenSearchIntegTestCase { + + public MoreLikeThisIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java index 9c2ddbba89903..bc1d2833ecbbf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/msearch/MultiSearchIT.java @@ -32,19 +32,44 @@ package org.opensearch.search.msearch; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasId; import static org.hamcrest.Matchers.equalTo; -public class MultiSearchIT extends OpenSearchIntegTestCase { +public class MultiSearchIT extends ParameterizedOpenSearchIntegTestCase { + + public MultiSearchIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } public void testSimpleMultiSearch() { createIndex("test"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java index 2db4121144bca..83dec7b27a897 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.nested; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.Explanation; import org.apache.lucene.search.join.ScoreMode; import org.opensearch.action.DocWriteResponse; @@ -45,6 +47,7 @@ import org.opensearch.action.search.SearchType; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -53,7 +56,10 @@ import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortMode; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -61,6 +67,7 @@ import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.nestedQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -70,7 +77,25 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; -public class SimpleNestedIT extends OpenSearchIntegTestCase { +public class SimpleNestedIT extends ParameterizedOpenSearchIntegTestCase { + + public SimpleNestedIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public void testSimpleNested() throws Exception { assertAcked(prepareCreate("test").setMapping("nested1", "type=nested")); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java index 799996d4b97dc..8ae652082f653 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java @@ -8,6 +8,7 @@ package org.opensearch.search.pit; +import org.opensearch.ExceptionsHelper; import org.opensearch.action.LatchedActionListener; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; @@ -18,10 +19,14 @@ import org.opensearch.action.search.DeletePitInfo; import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; +import org.opensearch.search.SearchContextMissingException; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; @@ -88,8 +93,8 @@ public void testDeletePit() throws Exception { assertTrue(deletePitInfo.isSuccessful()); } validatePitStats("index", 0, 10); - /** - * Checking deleting the same PIT id again results in succeeded + /* + Checking deleting the same PIT id again results in succeeded */ deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); deletePITResponse = deleteExecute.get(); @@ -108,8 +113,8 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception { pitIds.add(pitResponse.getId()); validatePitStats("index", 5, 0); - /** - * Delete Pit #1 + /* + Delete Pit #1 */ DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); ActionFuture deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); @@ -123,8 +128,8 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception { pitResponse = execute.get(); pitIds.add(pitResponse.getId()); validatePitStats("index", 5, 5); - /** - * Delete PIT with both Ids #1 (which is deleted) and #2 (which is present) + /* + Delete PIT with both Ids #1 (which is deleted) and #2 (which is present) */ deletePITRequest = new DeletePitRequest(pitIds); deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); @@ -160,9 +165,9 @@ public void testDeleteAllPits() throws Exception { validatePitStats("index1", 5, 0); DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); - /** - * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context - * not found exceptions don't result in failures ( as deletion in one node is successful ) + /* + When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + not found exceptions don't result in failures ( as deletion in one node is successful ) */ ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); @@ -202,9 +207,9 @@ public Settings onNodeStopped(String nodeName) throws Exception { }); ensureGreen(); - /** - * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context - * not found exceptions don't result in failures ( as deletion in one node is successful ) + /* + When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + not found exceptions don't result in failures ( as deletion in one node is successful ) */ ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); @@ -237,9 +242,9 @@ public Settings onNodeStopped(String nodeName) throws Exception { } }); ensureGreen(); - /** - * When we invoke delete again, returns success as all readers are cleared. (Delete all on node which is Up and - * once the node restarts, all active contexts are cleared in the node ) + /* + When we invoke delete again, returns success as all readers are cleared. (Delete all on node which is Up and + once the node restarts, all active contexts are cleared in the node ) */ ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); @@ -263,18 +268,23 @@ public void testDeleteWhileSearch() throws Exception { try { latch.await(); for (int j = 0; j < 30; j++) { - client().prepareSearch() + SearchResponse searchResponse = client().prepareSearch() .setSize(2) .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) .execute() .get(); + if (searchResponse.getFailedShards() != 0) { + verifySearchContextMissingException(searchResponse.getShardFailures()); + } } } catch (Exception e) { - /** - * assert for exception once delete pit goes through. throw error in case of any exeption before that. + /* + assert for exception once delete pit goes through. throw error in case of any exeption before that. */ if (deleted.get() == true) { - if (!e.getMessage().contains("all shards failed")) throw new AssertionError(e); + Throwable t = ExceptionsHelper.unwrapCause(e.getCause()); + assertTrue(e.toString(), t instanceof SearchPhaseExecutionException); + verifySearchContextMissingException(((SearchPhaseExecutionException) t).shardFailures()); return; } throw new AssertionError(e); @@ -283,9 +293,9 @@ public void testDeleteWhileSearch() throws Exception { threads[i].setName("opensearch[node_s_0][search]"); threads[i].start(); } + deleted.set(true); ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); - deleted.set(true); for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { assertTrue(pitIds.contains(deletePitInfo.getPitId())); assertTrue(deletePitInfo.isSuccessful()); @@ -296,6 +306,17 @@ public void testDeleteWhileSearch() throws Exception { } } + private void verifySearchContextMissingException(ShardSearchFailure[] failures) { + for (ShardSearchFailure failure : failures) { + Throwable cause = ExceptionsHelper.unwrapCause(failure.getCause()); + if (failure.toString().contains("reader_context is already closed can't increment refCount current count")) { + // this is fine, expected search error when context is already deleted + } else { + assertTrue(failure.toString(), cause instanceof SearchContextMissingException); + } + } + } + public void testtConcurrentDeletes() throws InterruptedException, ExecutionException { CreatePitResponse pitResponse = createPitOnIndex("index"); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java index 961ec4f184e55..413e5eafe56c7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java @@ -8,6 +8,8 @@ package org.opensearch.search.pit; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.LatchedActionListener; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; @@ -28,10 +30,12 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.junit.After; @@ -39,6 +43,8 @@ import org.junit.Before; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.LinkedList; import java.util.List; @@ -51,6 +57,7 @@ import static org.opensearch.action.search.PitTestsUtil.assertSegments; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; @@ -58,7 +65,23 @@ * Multi node integration tests for PIT creation and search operation with PIT ID. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 2) -public class PitMultiNodeIT extends OpenSearchIntegTestCase { +public class PitMultiNodeIT extends ParameterizedOpenSearchIntegTestCase { + public PitMultiNodeIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Before public void setupIndex() throws ExecutionException, InterruptedException { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java index 911d2fcae01fe..425764b1c88d2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/preference/SearchPreferenceIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.preference; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.search.SearchRequestBuilder; @@ -42,19 +44,24 @@ import org.opensearch.cluster.routing.OperationRouting; import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.node.Node; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.Set; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -64,7 +71,24 @@ import static org.hamcrest.Matchers.not; @OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SearchPreferenceIT extends OpenSearchIntegTestCase { +public class SearchPreferenceIT extends ParameterizedOpenSearchIntegTestCase { + + public SearchPreferenceIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override public Settings nodeSettings(int nodeOrdinal) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java index b4d7269bab106..82dd6225fda4e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/aggregation/AggregationProfilerIT.java @@ -32,8 +32,12 @@ package org.opensearch.search.profile.aggregation; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.BucketOrder; import org.opensearch.search.aggregations.InternalAggregation; @@ -46,15 +50,19 @@ import org.opensearch.search.profile.query.CollectorResult; import org.opensearch.search.profile.query.QueryProfileShardResult; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.core.IsNull; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.diversifiedSampler; import static org.opensearch.search.aggregations.AggregationBuilders.global; @@ -75,7 +83,8 @@ import static org.hamcrest.Matchers.sameInstance; @OpenSearchIntegTestCase.SuiteScopeTestCase -public class AggregationProfilerIT extends OpenSearchIntegTestCase { +public class AggregationProfilerIT extends ParameterizedOpenSearchIntegTestCase { + private static final String BUILD_LEAF_COLLECTOR = AggregationTimingType.BUILD_LEAF_COLLECTOR.toString(); private static final String COLLECT = AggregationTimingType.COLLECT.toString(); private static final String POST_COLLECTION = AggregationTimingType.POST_COLLECTION.toString(); @@ -157,6 +166,23 @@ public class AggregationProfilerIT extends OpenSearchIntegTestCase { private static final String REASON_SEARCH_TOP_HITS = "search_top_hits"; private static final String REASON_AGGREGATION = "aggregation"; + public AggregationProfilerIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected int numberOfShards() { return 1; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java index 5f794d2abf878..ef73438114079 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/profile/query/QueryProfilerIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.profile.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.tests.util.English; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.MultiSearchResponse; @@ -40,20 +42,23 @@ import org.opensearch.action.search.SearchType; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; import org.opensearch.search.profile.ProfileResult; import org.opensearch.search.profile.ProfileShardResult; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; @@ -61,8 +66,32 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +public class QueryProfilerIT extends ParameterizedOpenSearchIntegTestCase { + private final boolean concurrentSearchEnabled; + private static final String MAX_PREFIX = "max_"; + private static final String MIN_PREFIX = "min_"; + private static final String AVG_PREFIX = "avg_"; + private static final String TIMING_TYPE_COUNT_SUFFIX = "_count"; + + public QueryProfilerIT(Settings settings, boolean concurrentSearchEnabled) { + super(settings); + this.concurrentSearchEnabled = concurrentSearchEnabled; + } -public class QueryProfilerIT extends OpenSearchIntegTestCase { + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build(), false }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build(), true } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } /** * This test simply checks to make sure nothing crashes. Test indexes 100-150 documents, @@ -229,6 +258,7 @@ public void testSimpleMatch() throws Exception { assertEquals(result.getLuceneDescription(), "field1:one"); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -271,6 +301,7 @@ public void testBool() throws Exception { assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); assertEquals(result.getProfiledChildren().size(), 2); + assertQueryProfileResult(result); // Check the children List children = result.getProfiledChildren(); @@ -282,12 +313,14 @@ public void testBool() throws Exception { assertThat(childProfile.getTime(), greaterThan(0L)); assertNotNull(childProfile.getTimeBreakdown()); assertEquals(childProfile.getProfiledChildren().size(), 0); + assertQueryProfileResult(childProfile); childProfile = children.get(1); assertEquals(childProfile.getQueryName(), "TermQuery"); assertEquals(childProfile.getLuceneDescription(), "field1:two"); assertThat(childProfile.getTime(), greaterThan(0L)); assertNotNull(childProfile.getTimeBreakdown()); + assertQueryProfileResult(childProfile); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -330,6 +363,7 @@ public void testEmptyBool() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -375,6 +409,7 @@ public void testCollapsingBool() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -415,6 +450,90 @@ public void testBoosting() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), is(not(emptyOrNullString()))); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + public void testSearchLeafForItsLeavesAndRewriteQuery() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = 122; + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test").setId(String.valueOf(i)).setSource("field1", English.intToEnglish(i), "field2", i); + } + + List terms = Arrays.asList("zero", "zero", "one"); + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.boostingQuery( + QueryBuilders.idsQuery().addIds(String.valueOf(randomInt()), String.valueOf(randomInt())), + QueryBuilders.termsQuery("field1", terms) + ).boost(randomFloat()).negativeBoost(randomFloat()); + logger.info("Query: {}", q); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setTrackTotalHits(true) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .get(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); + + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + assertThat(shardResult.getValue().getNetworkTime().getInboundNetworkTime(), greaterThanOrEqualTo(0L)); + assertThat(shardResult.getValue().getNetworkTime().getOutboundNetworkTime(), greaterThanOrEqualTo(0L)); + for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { + List results = searchProfiles.getQueryResults(); + for (ProfileResult result : results) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + Map breakdown = result.getTimeBreakdown(); + Long maxSliceTime = result.getMaxSliceTime(); + Long minSliceTime = result.getMinSliceTime(); + Long avgSliceTime = result.getAvgSliceTime(); + if (concurrentSearchEnabled && results.get(0).equals(result)) { + assertNotNull(maxSliceTime); + assertNotNull(minSliceTime); + assertNotNull(avgSliceTime); + assertThat(breakdown.size(), equalTo(66)); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + if (queryTimingType != QueryTimingType.CREATE_WEIGHT) { + String maxTimingType = MAX_PREFIX + queryTimingType; + String minTimingType = MIN_PREFIX + queryTimingType; + String avgTimingType = AVG_PREFIX + queryTimingType; + assertNotNull(breakdown.get(maxTimingType)); + assertNotNull(breakdown.get(minTimingType)); + assertNotNull(breakdown.get(avgTimingType)); + assertNotNull(breakdown.get(maxTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(minTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(avgTimingType + TIMING_TYPE_COUNT_SUFFIX)); + } + } + } else if (concurrentSearchEnabled) { + assertThat(maxSliceTime, equalTo(0L)); + assertThat(minSliceTime, equalTo(0L)); + assertThat(avgSliceTime, equalTo(0L)); + assertThat(breakdown.size(), equalTo(27)); + } else { + assertThat(maxSliceTime, is(nullValue())); + assertThat(minSliceTime, is(nullValue())); + assertThat(avgSliceTime, is(nullValue())); + assertThat(breakdown.size(), equalTo(27)); + } } CollectorResult result = searchProfiles.getCollectorResult(); @@ -455,6 +574,7 @@ public void testDisMaxRange() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -494,6 +614,7 @@ public void testRange() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -547,6 +668,7 @@ public void testPhrase() throws Exception { assertNotNull(result.getLuceneDescription()); assertThat(result.getTime(), greaterThan(0L)); assertNotNull(result.getTimeBreakdown()); + assertQueryProfileResult(result); } CollectorResult result = searchProfiles.getCollectorResult(); @@ -579,4 +701,35 @@ public void testNoProfile() throws Exception { assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0)); } + private void assertQueryProfileResult(ProfileResult result) { + Map breakdown = result.getTimeBreakdown(); + Long maxSliceTime = result.getMaxSliceTime(); + Long minSliceTime = result.getMinSliceTime(); + Long avgSliceTime = result.getAvgSliceTime(); + if (concurrentSearchEnabled) { + assertNotNull(maxSliceTime); + assertNotNull(minSliceTime); + assertNotNull(avgSliceTime); + assertThat(breakdown.size(), equalTo(66)); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + if (queryTimingType != QueryTimingType.CREATE_WEIGHT) { + String maxTimingType = MAX_PREFIX + queryTimingType; + String minTimingType = MIN_PREFIX + queryTimingType; + String avgTimingType = AVG_PREFIX + queryTimingType; + assertNotNull(breakdown.get(maxTimingType)); + assertNotNull(breakdown.get(minTimingType)); + assertNotNull(breakdown.get(avgTimingType)); + assertNotNull(breakdown.get(maxTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(minTimingType + TIMING_TYPE_COUNT_SUFFIX)); + assertNotNull(breakdown.get(avgTimingType + TIMING_TYPE_COUNT_SUFFIX)); + } + } + } else { + assertThat(maxSliceTime, is(nullValue())); + assertThat(minSliceTime, is(nullValue())); + assertThat(avgSliceTime, is(nullValue())); + assertThat(breakdown.size(), equalTo(27)); + } + } + } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java index 0ca1780410e13..e3253ea583ac2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/ExistsIT.java @@ -32,17 +32,23 @@ package org.opensearch.search.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.explain.ExplainResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; @@ -51,11 +57,29 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; -public class ExistsIT extends OpenSearchIntegTestCase { +public class ExistsIT extends ParameterizedOpenSearchIntegTestCase { + + public ExistsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } // TODO: move this to a unit test somewhere... public void testEmptyIndex() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java index 0c5fa4369da22..457114bac33b8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java @@ -31,6 +31,7 @@ package org.opensearch.search.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; @@ -38,6 +39,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; @@ -52,11 +54,12 @@ import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.MockKeywordPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.junit.Before; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -73,6 +76,7 @@ import static org.opensearch.index.query.QueryBuilders.matchQuery; import static org.opensearch.index.query.QueryBuilders.multiMatchQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -88,7 +92,24 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThan; -public class MultiMatchQueryIT extends OpenSearchIntegTestCase { +public class MultiMatchQueryIT extends ParameterizedOpenSearchIntegTestCase { + + public MultiMatchQueryIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java index 53a41af46790b..099eb934f4f4d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java @@ -32,10 +32,13 @@ package org.opensearch.search.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperService; @@ -44,18 +47,21 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.SearchModule; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.junit.Before; import org.junit.BeforeClass; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.queryStringQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.StreamsUtils.copyToStringFromClasspath; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -64,10 +70,27 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -public class QueryStringIT extends OpenSearchIntegTestCase { +public class QueryStringIT extends ParameterizedOpenSearchIntegTestCase { private static int CLUSTER_MAX_CLAUSE_COUNT; + public QueryStringIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @BeforeClass public static void createRandomClusterSetting() { CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(50, 100); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java index d736365a6e236..7ba582811bbc2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/ScriptScoreQueryIT.java @@ -32,10 +32,13 @@ package org.opensearch.search.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.RangeQueryBuilder; @@ -43,8 +46,9 @@ import org.opensearch.script.MockScriptPlugin; import org.opensearch.script.Script; import org.opensearch.script.ScriptType; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -54,6 +58,7 @@ import static org.opensearch.index.query.QueryBuilders.boolQuery; import static org.opensearch.index.query.QueryBuilders.matchQuery; import static org.opensearch.index.query.QueryBuilders.scriptScoreQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -62,7 +67,24 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertThirdHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasScore; -public class ScriptScoreQueryIT extends OpenSearchIntegTestCase { +public class ScriptScoreQueryIT extends ParameterizedOpenSearchIntegTestCase { + + public ScriptScoreQueryIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index 1c3a58817e48a..53bded1fc493c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.analysis.pattern.PatternReplaceCharFilter; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.MultiTermQuery; @@ -49,6 +51,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.unit.Fuzziness; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -77,7 +80,7 @@ import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.AggregationBuilders; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.junit.annotations.TestIssueLogging; import java.io.IOException; @@ -126,6 +129,7 @@ import static org.opensearch.index.query.QueryBuilders.termsQuery; import static org.opensearch.index.query.QueryBuilders.wildcardQuery; import static org.opensearch.index.query.QueryBuilders.wrapperQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; @@ -143,7 +147,24 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -public class SearchQueryIT extends OpenSearchIntegTestCase { +public class SearchQueryIT extends ParameterizedOpenSearchIntegTestCase { + + public SearchQueryIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java index bccbce3b29b8e..384d2b7423e66 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.query; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; @@ -41,6 +43,7 @@ import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -57,11 +60,12 @@ import org.opensearch.search.SearchHits; import org.opensearch.search.SearchModule; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.junit.BeforeClass; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -75,6 +79,7 @@ import static org.opensearch.index.query.QueryBuilders.queryStringQuery; import static org.opensearch.index.query.QueryBuilders.simpleQueryStringQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.StreamsUtils.copyToStringFromClasspath; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; @@ -90,10 +95,27 @@ /** * Tests for the {@code simple_query_string} query */ -public class SimpleQueryStringIT extends OpenSearchIntegTestCase { +public class SimpleQueryStringIT extends ParameterizedOpenSearchIntegTestCase { private static int CLUSTER_MAX_CLAUSE_COUNT; + public SimpleQueryStringIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @BeforeClass public static void createRandomClusterSetting() { CLUSTER_MAX_CLAUSE_COUNT = randomIntBetween(60, 100); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java index e081be0af51a2..34967528f2c4f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scriptfilter/ScriptQuerySearchIT.java @@ -32,10 +32,13 @@ package org.opensearch.search.scriptfilter; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexModule; @@ -47,6 +50,7 @@ import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalSettingsPlugin; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.Arrays; @@ -61,12 +65,29 @@ import static java.util.Collections.emptyMap; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.scriptQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE) -public class ScriptQuerySearchIT extends OpenSearchIntegTestCase { +public class ScriptQuerySearchIT extends ParameterizedOpenSearchIntegTestCase { + public ScriptQuerySearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java index e0a54e9b4fc36..c7a6d18f881c6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/DuelScrollIT.java @@ -32,6 +32,7 @@ package org.opensearch.search.scroll; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.opensearch.action.index.IndexRequestBuilder; @@ -39,24 +40,44 @@ import org.opensearch.action.search.SearchType; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.sort.SortBuilder; import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.Set; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; -public class DuelScrollIT extends OpenSearchIntegTestCase { +public class DuelScrollIT extends ParameterizedOpenSearchIntegTestCase { + public DuelScrollIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public void testDuelQueryThenFetch() throws Exception { TestContext context = create(SearchType.DFS_QUERY_THEN_FETCH, SearchType.QUERY_THEN_FETCH); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java index aec6a03d3e57f..0eee136acac69 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.scroll; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.ExceptionsHelper; import org.opensearch.action.search.ClearScrollResponse; import org.opensearch.action.search.SearchPhaseExecutionException; @@ -43,6 +45,7 @@ import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.bytes.BytesReference; @@ -57,11 +60,13 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.InternalTestCluster; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import org.junit.After; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Map; @@ -70,6 +75,7 @@ import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.queryStringQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; @@ -86,7 +92,24 @@ /** * Tests for scrolling. */ -public class SearchScrollIT extends OpenSearchIntegTestCase { +public class SearchScrollIT extends ParameterizedOpenSearchIntegTestCase { + public SearchScrollIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @After public void cleanup() throws Exception { assertAcked( diff --git a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java index c6519cc3a0cb3..27002b844da1d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/scroll/SearchScrollWithFailingNodesIT.java @@ -32,26 +32,50 @@ package org.opensearch.search.scroll; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThan; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) -public class SearchScrollWithFailingNodesIT extends OpenSearchIntegTestCase { +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 2, numClientNodes = 0) +public class SearchScrollWithFailingNodesIT extends ParameterizedOpenSearchIntegTestCase { + public SearchScrollWithFailingNodesIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected int numberOfShards() { return 2; @@ -63,8 +87,6 @@ protected int numberOfReplicas() { } public void testScanScrollWithShardExceptions() throws Exception { - internalCluster().startNode(); - internalCluster().startNode(); assertAcked( prepareCreate("test") // Enforces that only one shard can only be allocated to a single node @@ -97,7 +119,7 @@ public void testScanScrollWithShardExceptions() throws Exception { assertThat(numHits, equalTo(100L)); clearScroll("_all"); - internalCluster().stopRandomNonClusterManagerNode(); + internalCluster().stopRandomDataNode(); searchResponse = client().prepareSearch().setQuery(matchAllQuery()).setSize(10).setScroll(TimeValue.timeValueMinutes(1)).get(); assertThat(searchResponse.getSuccessfulShards(), lessThan(searchResponse.getTotalShards())); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java index 22c0a9cbbab17..b99f66850e9e3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.searchafter; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.CreatePitAction; @@ -43,30 +45,51 @@ import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.UUIDs; import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.SearchHit; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.hamcrest.Matchers; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -public class SearchAfterIT extends OpenSearchIntegTestCase { +public class SearchAfterIT extends ParameterizedOpenSearchIntegTestCase { private static final String INDEX_NAME = "test"; private static final int NUM_DOCS = 100; + public SearchAfterIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public void testsShouldFail() throws Exception { assertAcked(client().admin().indices().prepareCreate("test").setMapping("field1", "type=long", "field2", "type=keyword").get()); ensureGreen(); @@ -197,8 +220,8 @@ public void testPitWithSearchAfter() throws Exception { .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) .get(); assertEquals(3, sr.getHits().getHits().length); - /** - * Add new data and assert PIT results remain the same and normal search results gets refreshed + /* + Add new data and assert PIT results remain the same and normal search results gets refreshed */ indexRandom(true, client().prepareIndex("test").setId("4").setSource("field1", 102)); sr = client().prepareSearch() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java index 0e6073ad11689..7aae41d939cac 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/simple/SimpleSearchIT.java @@ -32,12 +32,15 @@ package org.opensearch.search.simple; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.WriteRequest.RefreshPolicy; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.rest.RestStatus; @@ -49,9 +52,11 @@ import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.rescore.QueryRescorerBuilder; import org.opensearch.search.sort.SortOrder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.concurrent.ExecutionException; @@ -62,14 +67,34 @@ import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.queryStringQuery; import static org.opensearch.index.query.QueryBuilders.rangeQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.apache.lucene.search.TotalHits.Relation.EQUAL_TO; +import static org.apache.lucene.search.TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; + +public class SimpleSearchIT extends ParameterizedOpenSearchIntegTestCase { -public class SimpleSearchIT extends OpenSearchIntegTestCase { + public SimpleSearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } public void testSearchNullIndex() { expectThrows( @@ -258,10 +283,9 @@ public void testSimpleDateRange() throws Exception { assertHitCount(searchResponse, 2L); } - public void testSimpleTerminateAfterCount() throws Exception { + public void dotestSimpleTerminateAfterCountWithSize(int size, int max) throws Exception { prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); ensureGreen(); - int max = randomIntBetween(3, 29); List docbuilders = new ArrayList<>(max); for (int i = 1; i <= max; i++) { @@ -278,9 +302,12 @@ public void testSimpleTerminateAfterCount() throws Exception { searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(max)) .setTerminateAfter(i) + .setSize(size) + .setTrackTotalHits(true) .get(); assertHitCount(searchResponse, i); assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(Math.min(i, size), searchResponse.getHits().getHits().length); } searchResponse = client().prepareSearch("test") @@ -292,6 +319,101 @@ public void testSimpleTerminateAfterCount() throws Exception { assertFalse(searchResponse.isTerminatedEarly()); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10435") + public void testSimpleTerminateAfterCountSize0() throws Exception { + int max = randomIntBetween(3, 29); + dotestSimpleTerminateAfterCountWithSize(0, max); + } + + public void testSimpleTerminateAfterCountRandomSize() throws Exception { + int max = randomIntBetween(3, 29); + dotestSimpleTerminateAfterCountWithSize(randomIntBetween(1, max), max); + } + + public void doTestSimpleTerminateAfterTrackTotalHitsUpTo(int size) throws Exception { + prepareCreate("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)).get(); + ensureGreen(); + int numDocs = 29; + List docbuilders = new ArrayList<>(numDocs); + + for (int i = 1; i <= numDocs; i++) { + String id = String.valueOf(i); + docbuilders.add(client().prepareIndex("test").setId(id).setSource("field", i)); + } + + indexRandom(true, docbuilders); + ensureGreen(); + refresh(); + + SearchResponse searchResponse; + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(10) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(GREATER_THAN_OR_EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHitsUpTo(10) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(5) + .setSize(size) + .setTrackTotalHits(true) + .get(); + assertTrue(searchResponse.isTerminatedEarly()); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setTerminateAfter(numDocs * 2) + .setSize(size) + .setTrackTotalHits(true) + .get(); + assertFalse(searchResponse.isTerminatedEarly()); + assertEquals(numDocs, searchResponse.getHits().getTotalHits().value); + assertEquals(EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("field").gte(1).lte(numDocs)) + .setSize(size) + .setTrackTotalHitsUpTo(5) + .get(); + assertEquals(5, searchResponse.getHits().getTotalHits().value); + assertEquals(GREATER_THAN_OR_EQUAL_TO, searchResponse.getHits().getTotalHits().relation); + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10435") + public void testSimpleTerminateAfterTrackTotalHitsUpToRandomSize() throws Exception { + doTestSimpleTerminateAfterTrackTotalHitsUpTo(0); + } + + public void testSimpleTerminateAfterTrackTotalHitsUpToSize0() throws Exception { + doTestSimpleTerminateAfterTrackTotalHitsUpTo(randomIntBetween(1, 29)); + } + public void testSimpleIndexSortEarlyTerminate() throws Exception { prepareCreate("test").setSettings( Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).put("index.sort.field", "rank") diff --git a/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java index d50f750a2b2ec..27a56f9d14f08 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.slice; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.CreatePitAction; @@ -43,6 +45,7 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.search.Scroll; @@ -50,21 +53,41 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.sort.SortBuilders; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.concurrent.ExecutionException; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.startsWith; -public class SearchSliceIT extends OpenSearchIntegTestCase { +public class SearchSliceIT extends ParameterizedOpenSearchIntegTestCase { + public SearchSliceIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + private void setupIndex(int numDocs, int numberOfShards) throws IOException, ExecutionException, InterruptedException { String mapping = XContentFactory.jsonBuilder() .startObject() diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java index 6681715981c54..6886f8d67589e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceIT.java @@ -32,24 +32,30 @@ package org.opensearch.search.sort; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.Version; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.geo.GeoDistance; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.geometry.utils.Geohash; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFirstHit; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -60,7 +66,24 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -public class GeoDistanceIT extends OpenSearchIntegTestCase { +public class GeoDistanceIT extends ParameterizedOpenSearchIntegTestCase { + + public GeoDistanceIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected boolean forbidPrivateIndexSettings() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java index 5a0ca1d13633e..1b8bd9694483d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/GeoDistanceSortBuilderIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.sort; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.Version; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.metadata.IndexMetadata; @@ -39,28 +41,47 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.DistanceUnit; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.GeoValidationMethod; import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.concurrent.ExecutionException; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.sort.SortBuilders.fieldSort; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertOrderedSearchHits; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSortValues; import static org.hamcrest.Matchers.closeTo; -public class GeoDistanceSortBuilderIT extends OpenSearchIntegTestCase { +public class GeoDistanceSortBuilderIT extends ParameterizedOpenSearchIntegTestCase { + public GeoDistanceSortBuilderIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } private static final String LOCATION_FIELD = "location"; @@ -70,16 +91,16 @@ protected boolean forbidPrivateIndexSettings() { } public void testManyToManyGeoPoints() throws ExecutionException, InterruptedException, IOException { - /** - * | q | d1 | d2 - * | | | - * | | | - * | | | - * |2 o| x | x - * | | | - * |1 o| x | x - * |___________________________ - * 1 2 3 4 5 6 7 + /* + | q | d1 | d2 + | | | + | | | + | | | + |2 o| x | x + | | | + |1 o| x | x + |___________________________ + 1 2 3 4 5 6 7 */ Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomIndexCompatibleVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); @@ -166,11 +187,10 @@ public void testManyToManyGeoPoints() throws ExecutionException, InterruptedExce } public void testSingeToManyAvgMedian() throws ExecutionException, InterruptedException, IOException { - /** - * q = (0, 0) - * - * d1 = (0, 1), (0, 4), (0, 10); so avg. distance is 5, median distance is 4 - * d2 = (0, 1), (0, 5), (0, 6); so avg. distance is 4, median distance is 5 + /* + q = (0, 0) + d1 = (0, 1), (0, 4), (0, 10); so avg. distance is 5, median distance is 4 + d2 = (0, 1), (0, 5), (0, 6); so avg. distance is 4, median distance is 5 */ Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomIndexCompatibleVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); @@ -235,16 +255,17 @@ protected void createShuffeldJSONArray(XContentBuilder builder, GeoPoint[] point } public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionException, InterruptedException, IOException { - /** q d1 d2 - * |4 o| x | x - * | | | - * |3 o| x | x - * | | | - * |2 o| x | x - * | | | - * |1 o|x |x - * |______________________ - * 1 2 3 4 5 6 + /* + q d1 d2 + |4 o| x | x + | | | + |3 o| x | x + | | | + |2 o| x | x + | | | + |1 o|x |x + |______________________ + 1 2 3 4 5 6 */ Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomIndexCompatibleVersion(random()); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java index 5b896f9a1fe57..7bcded86fcaa8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/SortFromPluginIT.java @@ -8,21 +8,42 @@ package org.opensearch.search.sort; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.plugins.Plugin; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.sort.plugin.CustomSortBuilder; import org.opensearch.search.sort.plugin.CustomSortPlugin; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.Arrays; import java.util.Collection; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.equalTo; -public class SortFromPluginIT extends OpenSearchIntegTestCase { +public class SortFromPluginIT extends ParameterizedOpenSearchIntegTestCase { + public SortFromPluginIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java index 4f6dd89285bee..c98a38ea0bb97 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/source/MetadataFetchingIT.java @@ -31,26 +31,51 @@ package org.opensearch.search.source; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.search.join.ScoreMode; import org.opensearch.ExceptionsHelper; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.InnerHitBuilder; import org.opensearch.index.query.NestedQueryBuilder; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.SearchException; import org.opensearch.search.SearchHits; import org.opensearch.search.fetch.subphase.FetchSourceContext; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class MetadataFetchingIT extends OpenSearchIntegTestCase { +public class MetadataFetchingIT extends ParameterizedOpenSearchIntegTestCase { + + public MetadataFetchingIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public void testSimple() { assertAcked(prepareCreate("test")); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java index 11223d11ff30d..eeef5403fe898 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/source/SourceFetchingIT.java @@ -32,14 +32,40 @@ package org.opensearch.search.source; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsEqual.equalTo; -public class SourceFetchingIT extends OpenSearchIntegTestCase { +public class SourceFetchingIT extends ParameterizedOpenSearchIntegTestCase { + + public SourceFetchingIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public void testSourceDefaultBehavior() { createIndex("test"); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/stats/ConcurrentSearchStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/stats/ConcurrentSearchStatsIT.java new file mode 100644 index 0000000000000..7f819450896b1 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/stats/ConcurrentSearchStatsIT.java @@ -0,0 +1,364 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.stats; + +import org.opensearch.action.admin.cluster.node.stats.NodesStatsAction; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.indices.stats.IndexStats; +import org.opensearch.action.admin.indices.stats.IndicesStatsRequestBuilder; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexSettings; +import org.opensearch.indices.IndicesQueryCache; +import org.opensearch.indices.IndicesService; +import org.opensearch.plugins.Plugin; +import org.opensearch.script.MockScriptPlugin; +import org.opensearch.script.Script; +import org.opensearch.script.ScriptType; +import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.threadpool.ThreadPoolStats; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Locale; +import java.util.Map; +import java.util.function.Function; + +import static org.opensearch.index.query.QueryBuilders.scriptQuery; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThan; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 1, numClientNodes = 0, supportsDedicatedMasters = false) +public class ConcurrentSearchStatsIT extends OpenSearchIntegTestCase { + + private final int SEGMENT_SLICE_COUNT = 4; + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(ScriptedDelayedPlugin.class, InternalSettingsPlugin.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + // Filter/Query cache is cleaned periodically, default is 60s, so make sure it runs often. Thread.sleep for 60s is bad + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING.getKey(), "1ms") + .put(IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.getKey(), true) + .put(CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, SEGMENT_SLICE_COUNT) + .build(); + } + + @Override + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), false) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 0) + .build(); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + + public void testConcurrentQueryCount() throws Exception { + String INDEX_1 = "test-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + String INDEX_2 = "test-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + int NUM_SHARDS = randomIntBetween(1, 5); + createIndex( + INDEX_1, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, NUM_SHARDS) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + createIndex( + INDEX_2, + Settings.builder() + .put(indexSettings()) + .put("search.concurrent_segment_search.enabled", false) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, NUM_SHARDS) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + + ensureGreen(); + + indexRandom( + false, + true, + client().prepareIndex(INDEX_1).setId("1").setSource("foo", "bar"), + client().prepareIndex(INDEX_1).setId("2").setSource("foo", "baz"), + client().prepareIndex(INDEX_2).setId("1").setSource("foo", "bar"), + client().prepareIndex(INDEX_2).setId("2").setSource("foo", "baz") + ); + + refresh(); + + // Search with custom plugin to ensure that queryTime is significant + client().prepareSearch(INDEX_1, INDEX_2) + .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", ScriptedDelayedPlugin.SCRIPT_NAME, Collections.emptyMap()))) + .execute() + .actionGet(); + client().prepareSearch(INDEX_1) + .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", ScriptedDelayedPlugin.SCRIPT_NAME, Collections.emptyMap()))) + .execute() + .actionGet(); + client().prepareSearch(INDEX_2) + .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", ScriptedDelayedPlugin.SCRIPT_NAME, Collections.emptyMap()))) + .execute() + .actionGet(); + + IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats(); + IndicesStatsResponse stats = builder.execute().actionGet(); + + assertEquals(4 * NUM_SHARDS, stats.getTotal().search.getTotal().getQueryCount()); + assertEquals(2 * NUM_SHARDS, stats.getTotal().search.getTotal().getConcurrentQueryCount()); + assertThat(stats.getTotal().search.getTotal().getQueryTimeInMillis(), greaterThan(0L)); + assertThat(stats.getTotal().search.getTotal().getConcurrentQueryTimeInMillis(), greaterThan(0L)); + assertThat( + stats.getTotal().search.getTotal().getConcurrentQueryTimeInMillis(), + lessThan(stats.getTotal().search.getTotal().getQueryTimeInMillis()) + ); + } + + /** + * Test average concurrency is correctly calculated across indices for the same node + */ + public void testAvgConcurrencyNodeLevel() throws InterruptedException { + int NUM_SHARDS = 1; + String INDEX_1 = "test-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + String INDEX_2 = "test-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + + // Create index test1 with 4 segments + createIndex( + INDEX_1, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, NUM_SHARDS) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + ensureGreen(); + for (int i = 0; i < 4; i++) { + client().prepareIndex(INDEX_1).setId(Integer.toString(i)).setSource("field", "value" + i).get(); + refresh(); + } + + client().prepareSearch(INDEX_1).execute().actionGet(); + NodesStatsResponse nodesStatsResponse = client().admin().cluster().prepareNodesStats().execute().actionGet(); + + assertEquals(1, nodesStatsResponse.getNodes().size(), 0); + double expectedConcurrency = SEGMENT_SLICE_COUNT; + assertEquals( + SEGMENT_SLICE_COUNT, + nodesStatsResponse.getNodes().get(0).getIndices().getSearch().getTotal().getConcurrentAvgSliceCount(), + 0 + ); + + forceMerge(); + // Sleep to make sure force merge completes + Thread.sleep(1000); + client().prepareSearch(INDEX_1).execute().actionGet(); + + nodesStatsResponse = client().admin().cluster().prepareNodesStats().execute().actionGet(); + + assertEquals(1, nodesStatsResponse.getNodes().size(), 0); + expectedConcurrency = (SEGMENT_SLICE_COUNT + 1) / 2.0; + assertEquals( + expectedConcurrency, + nodesStatsResponse.getNodes().get(0).getIndices().getSearch().getTotal().getConcurrentAvgSliceCount(), + 0 + ); + + // Create second index test2 with 4 segments + createIndex( + INDEX_2, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, NUM_SHARDS) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + ensureGreen(); + for (int i = 0; i < 4; i++) { + client().prepareIndex(INDEX_2).setId(Integer.toString(i)).setSource("field", "value" + i).get(); + refresh(); + } + + client().prepareSearch(INDEX_2).execute().actionGet(); + nodesStatsResponse = client().admin().cluster().prepareNodesStats().execute().actionGet(); + + assertEquals(1, nodesStatsResponse.getNodes().size(), 0); + expectedConcurrency = (SEGMENT_SLICE_COUNT + 1 + SEGMENT_SLICE_COUNT) / 3.0; + assertEquals( + expectedConcurrency, + nodesStatsResponse.getNodes().get(0).getIndices().getSearch().getTotal().getConcurrentAvgSliceCount(), + 0 + ); + + forceMerge(); + // Sleep to make sure force merge completes + Thread.sleep(1000); + client().prepareSearch(INDEX_2).execute().actionGet(); + nodesStatsResponse = client().admin().cluster().prepareNodesStats().execute().actionGet(); + + assertEquals(1, nodesStatsResponse.getNodes().size(), 0); + expectedConcurrency = (SEGMENT_SLICE_COUNT + 1 + SEGMENT_SLICE_COUNT + 1) / 4.0; + assertEquals( + expectedConcurrency, + nodesStatsResponse.getNodes().get(0).getIndices().getSearch().getTotal().getConcurrentAvgSliceCount(), + 0 + ); + + // Check that non-concurrent search requests do not affect the average concurrency + client().admin() + .indices() + .prepareUpdateSettings(INDEX_1) + .setSettings(Settings.builder().put("search.concurrent_segment_search.enabled", false)) + .execute() + .actionGet(); + client().admin() + .indices() + .prepareUpdateSettings(INDEX_2) + .setSettings(Settings.builder().put("search.concurrent_segment_search.enabled", false)) + .execute() + .actionGet(); + client().prepareSearch(INDEX_1).execute().actionGet(); + client().prepareSearch(INDEX_2).execute().actionGet(); + assertEquals(1, nodesStatsResponse.getNodes().size(), 0); + assertEquals( + expectedConcurrency, + nodesStatsResponse.getNodes().get(0).getIndices().getSearch().getTotal().getConcurrentAvgSliceCount(), + 0 + ); + } + + /** + * Test average concurrency is correctly calculated across shard for the same index + */ + public void testAvgConcurrencyIndexLevel() throws InterruptedException { + int NUM_SHARDS = 2; + String INDEX = "test-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + createIndex( + INDEX, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, NUM_SHARDS) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + ensureGreen(); + // Create 4 segments on each shard + for (int i = 0; i < 4; i++) { + client().prepareIndex(INDEX).setId(Integer.toString(i)).setSource("field", "value" + i).setRouting("0").get(); + refresh(); + } + for (int i = 4; i < 8; i++) { + client().prepareIndex(INDEX).setId(Integer.toString(i)).setSource("field", "value" + i).setRouting("1").get(); + refresh(); + } + client().prepareSearch(INDEX).execute().actionGet(); + IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats().execute().actionGet(); + + IndexStats stats = indicesStatsResponse.getIndices().get(INDEX); + assertNotNull(stats); + double expectedConcurrency = (SEGMENT_SLICE_COUNT * NUM_SHARDS) / (double) NUM_SHARDS; + assertEquals(expectedConcurrency, stats.getTotal().getSearch().getTotal().getConcurrentAvgSliceCount(), 0); + + forceMerge(); + // Sleep to make sure force merge completes + Thread.sleep(1000); + client().prepareSearch(INDEX).execute().actionGet(); + + indicesStatsResponse = client().admin().indices().prepareStats().execute().actionGet(); + stats = indicesStatsResponse.getIndices().get(INDEX); + assertNotNull(stats); + expectedConcurrency = (SEGMENT_SLICE_COUNT * NUM_SHARDS + 1 * NUM_SHARDS) / (NUM_SHARDS * 2.0); + assertEquals(expectedConcurrency, stats.getTotal().getSearch().getTotal().getConcurrentAvgSliceCount(), 0); + + // Check that non-concurrent search requests do not affect the average concurrency + client().admin() + .indices() + .prepareUpdateSettings(INDEX) + .setSettings(Settings.builder().put("search.concurrent_segment_search.enabled", false)) + .execute() + .actionGet(); + + client().prepareSearch(INDEX).execute().actionGet(); + + indicesStatsResponse = client().admin().indices().prepareStats().execute().actionGet(); + stats = indicesStatsResponse.getIndices().get(INDEX); + assertNotNull(stats); + assertEquals(expectedConcurrency, stats.getTotal().getSearch().getTotal().getConcurrentAvgSliceCount(), 0); + } + + public void testThreadPoolWaitTime() throws Exception { + int NUM_SHARDS = 1; + String INDEX = "test-" + randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + createIndex( + INDEX, + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, NUM_SHARDS) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + + ensureGreen(); + + for (int i = 0; i < 10; i++) { + client().prepareIndex(INDEX).setId(Integer.toString(i)).setSource("field", "value" + i).get(); + refresh(); + } + + client().prepareSearch(INDEX).execute().actionGet(); + + NodesStatsRequestBuilder nodesStatsRequestBuilder = new NodesStatsRequestBuilder( + client().admin().cluster(), + NodesStatsAction.INSTANCE + ).setNodesIds().all(); + NodesStatsResponse nodesStatsResponse = nodesStatsRequestBuilder.execute().actionGet(); + ThreadPoolStats threadPoolStats = nodesStatsResponse.getNodes().get(0).getThreadPool(); + + for (ThreadPoolStats.Stats stats : threadPoolStats) { + if (stats.getName().equals(ThreadPool.Names.INDEX_SEARCHER)) { + assertThat(stats.getWaitTime().nanos(), greaterThan(0L)); + } + } + } + + public static class ScriptedDelayedPlugin extends MockScriptPlugin { + static final String SCRIPT_NAME = "search_timeout"; + + @Override + public Map, Object>> pluginScripts() { + return Collections.singletonMap(SCRIPT_NAME, params -> { + try { + Thread.sleep(500); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + return true; + }); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java index c72b5d40553b3..253a8b2b14824 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java @@ -32,9 +32,12 @@ package org.opensearch.search.stats; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.search.SearchPhaseName; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.GroupShardsIterator; @@ -42,6 +45,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.search.stats.SearchStats.Stats; import org.opensearch.plugins.Plugin; @@ -50,7 +54,9 @@ import org.opensearch.script.ScriptType; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -58,9 +64,11 @@ import java.util.Set; import java.util.function.Function; +import static org.opensearch.action.search.TransportSearchAction.SEARCH_REQUEST_STATS_ENABLED_KEY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @@ -72,8 +80,25 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -@OpenSearchIntegTestCase.ClusterScope(minNumDataNodes = 2) -public class SearchStatsIT extends OpenSearchIntegTestCase { +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, minNumDataNodes = 2) +public class SearchStatsIT extends ParameterizedOpenSearchIntegTestCase { + + public SearchStatsIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { @@ -103,6 +128,11 @@ public void testSimpleStats() throws Exception { assertThat(numNodes, greaterThanOrEqualTo(2)); final int shardsIdx1 = randomIntBetween(1, 10); // we make sure each node gets at least a single shard... final int shardsIdx2 = Math.max(numNodes - shardsIdx1, randomIntBetween(1, 10)); + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(SEARCH_REQUEST_STATS_ENABLED_KEY, true).build()) + .get(); assertThat(numNodes, lessThanOrEqualTo(shardsIdx1 + shardsIdx2)); assertAcked( prepareCreate("test1").setSettings( @@ -165,20 +195,40 @@ public void testSimpleStats() throws Exception { Set nodeIdsWithIndex = nodeIdsWithIndex("test1", "test2"); int num = 0; + int numOfCoordinators = 0; + for (NodeStats stat : nodeStats.getNodes()) { Stats total = stat.getIndices().getSearch().getTotal(); + if (total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.QUERY.getName()).getTimeInMillis() > 0) { + assertThat( + total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTimeInMillis(), + greaterThan(0L) + ); + assertEquals( + iters, + total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTotal() + ); + assertEquals( + iters, + total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.EXPAND.getName()).getTotal() + ); + assertEquals( + iters, + total.getRequestStatsLongHolder().getRequestStatsHolder().get(SearchPhaseName.FETCH.getName()).getTotal() + ); + numOfCoordinators += 1; + } if (nodeIdsWithIndex.contains(stat.getNode().getId())) { assertThat(total.getQueryCount(), greaterThan(0L)); assertThat(total.getQueryTimeInMillis(), greaterThan(0L)); num++; } else { - assertThat(total.getQueryCount(), equalTo(0L)); + assertThat(total.getQueryCount(), greaterThanOrEqualTo(0L)); assertThat(total.getQueryTimeInMillis(), equalTo(0L)); } } - + assertThat(numOfCoordinators, greaterThan(0)); assertThat(num, greaterThan(0)); - } private Set nodeIdsWithIndex(String... indices) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java index 7183f18acbadf..30dba87f8ef5d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java @@ -31,6 +31,7 @@ package org.opensearch.search.suggest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import org.apache.lucene.analysis.TokenStreamToAutomaton; @@ -47,6 +48,7 @@ import org.opensearch.common.FieldMemoryStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.MapperParsingException; @@ -63,7 +65,7 @@ import org.opensearch.search.suggest.completion.context.ContextMapping; import org.opensearch.search.suggest.completion.context.GeoContextMapping; import org.opensearch.test.InternalSettingsPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; @@ -81,6 +83,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.core.common.util.CollectionUtils.iterableAsArrayList; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAllSuccessful; import static org.opensearch.test.hamcrest.OpenSearchAssertions.hasId; @@ -96,7 +99,24 @@ import static org.hamcrest.Matchers.notNullValue; @SuppressCodecs("*") // requires custom completion format -public class CompletionSuggestSearchIT extends OpenSearchIntegTestCase { +public class CompletionSuggestSearchIT extends ParameterizedOpenSearchIntegTestCase { + public CompletionSuggestSearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + private final String INDEX = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); private final String FIELD = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); private final CompletionMappingBuilder completionMappingBuilder = new CompletionMappingBuilder(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java index 7f5e8abfc3b52..bac3e7fb61683 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/ContextCompletionSuggestSearchIT.java @@ -31,6 +31,7 @@ package org.opensearch.search.suggest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; @@ -40,6 +41,7 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; @@ -51,11 +53,12 @@ import org.opensearch.search.suggest.completion.context.ContextMapping; import org.opensearch.search.suggest.completion.context.GeoContextMapping; import org.opensearch.search.suggest.completion.context.GeoQueryContext; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; @@ -64,12 +67,29 @@ import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.core.IsEqual.equalTo; @SuppressCodecs("*") // requires custom completion format -public class ContextCompletionSuggestSearchIT extends OpenSearchIntegTestCase { +public class ContextCompletionSuggestSearchIT extends ParameterizedOpenSearchIntegTestCase { + public ContextCompletionSuggestSearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } private final String INDEX = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); private final String FIELD = RandomStrings.randomAsciiOfLength(random(), 10).toLowerCase(Locale.ROOT); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java index 017dd5ea668de..32bb0e34054bb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/SuggestSearchIT.java @@ -32,6 +32,8 @@ package org.opensearch.search.suggest; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.OpenSearchException; import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; @@ -39,6 +41,7 @@ import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; @@ -54,7 +57,7 @@ import org.opensearch.search.suggest.phrase.StupidBackoff; import org.opensearch.search.suggest.term.TermSuggestionBuilder; import org.opensearch.search.suggest.term.TermSuggestionBuilder.SuggestMode; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.IOException; @@ -73,6 +76,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.index.query.QueryBuilders.matchQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.suggest.SuggestBuilders.phraseSuggestion; import static org.opensearch.search.suggest.SuggestBuilders.termSuggestion; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -92,7 +96,23 @@ * possible these tests should declare for the first request, make the request, modify the configuration for the next request, make that * request, modify again, request again, etc. This makes it very obvious what changes between requests. */ -public class SuggestSearchIT extends OpenSearchIntegTestCase { +public class SuggestSearchIT extends ParameterizedOpenSearchIntegTestCase { + public SuggestSearchIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } // see #3196 public void testSuggestAcrossMultipleIndices() throws IOException { diff --git a/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java b/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java index 929aac388b678..8c9bff9833462 100644 --- a/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/similarity/SimilarityIT.java @@ -32,17 +32,41 @@ package org.opensearch.similarity; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -public class SimilarityIT extends OpenSearchIntegTestCase { +public class SimilarityIT extends ParameterizedOpenSearchIntegTestCase { + public SimilarityIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + public void testCustomBM25Similarity() throws Exception { try { client().admin().indices().prepareDelete("test").execute().actionGet(); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index 8f8ce85a6e256..00e2d9bd92158 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -44,7 +44,6 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; @@ -57,7 +56,6 @@ import org.opensearch.repositories.RepositoryShardId; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.snapshots.mockstore.MockRepository; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; @@ -177,10 +175,10 @@ public void testCloneSnapshotIndex() throws Exception { public void testCloneShallowSnapshotIndex() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final String remoteStoreRepoName = "remote-store-repo-name"; - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName)); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); @@ -190,9 +188,6 @@ public void testCloneShallowSnapshotIndex() throws Exception { final Path shallowSnapshotRepoPath = randomRepoPath(); createRepository(shallowSnapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy(shallowSnapshotRepoPath)); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); @@ -224,20 +219,20 @@ public void testCloneShallowSnapshotIndex() throws Exception { public void testShallowCloneNameAvailability() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final String remoteStoreRepoName = "remote-store-repo-name"; + final Path remoteStorePath = randomRepoPath().toAbsolutePath(); internalCluster().startClusterManagerOnlyNode( - Settings.builder().put(LARGE_SNAPSHOT_POOL_SETTINGS).put(remoteStoreClusterSettings(remoteStoreRepoName)).build() + Settings.builder() + .put(LARGE_SNAPSHOT_POOL_SETTINGS) + .put(remoteStoreClusterSettings(remoteStoreRepoName, remoteStorePath)) + .build() ); - internalCluster().startDataOnlyNode(); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStorePath)); final String shallowSnapshotRepoName = "shallow-snapshot-repo-name"; final Path shallowSnapshotRepoPath = randomRepoPath(); createRepository(shallowSnapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy(shallowSnapshotRepoPath)); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); @@ -261,18 +256,15 @@ public void testShallowCloneNameAvailability() throws Exception { public void testCloneAfterRepoShallowSettingEnabled() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final String remoteStoreRepoName = "remote-store-repo-name"; - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName)); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); createRepository(snapshotRepoName, "fs", snapshotRepoPath); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); @@ -298,18 +290,15 @@ public void testCloneAfterRepoShallowSettingEnabled() throws Exception { public void testCloneAfterRepoShallowSettingDisabled() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final String remoteStoreRepoName = "remote-store-repo-name"; - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName)); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy(snapshotRepoPath)); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index b78506e0b2bec..864779f86caf3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -9,23 +9,27 @@ package org.opensearch.snapshots; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.test.FeatureFlagSetter; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Locale; +import java.util.concurrent.TimeUnit; import java.util.stream.Stream; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.comparesEqualTo; import static org.hamcrest.Matchers.is; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -35,17 +39,14 @@ public class DeleteSnapshotIT extends AbstractSnapshotIntegTestCase { public void testDeleteSnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME)); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); createRepository(snapshotRepoName, "fs", snapshotRepoPath); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); @@ -65,16 +66,13 @@ public void testDeleteSnapshot() throws Exception { public void testDeleteShallowCopySnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME)); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); final String snapshotRepoName = "snapshot-repo-name"; createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy()); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); @@ -96,16 +94,12 @@ public void testDeleteShallowCopySnapshot() throws Exception { // Deleting multiple shallow copy snapshots as part of single delete call with repo having only shallow copy snapshots. public void testDeleteMultipleShallowCopySnapshotsCase1() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME)); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); final Client clusterManagerClient = internalCluster().clusterManagerClient(); ensureStableCluster(2); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); - final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowCopy(snapshotRepoPath)); @@ -142,10 +136,9 @@ public void testDeleteMultipleShallowCopySnapshotsCase1() throws Exception { @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8610") public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME)); - final String dataNode = internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + final String dataNode = internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); ensureStableCluster(2); final String clusterManagerNode = internalCluster().getClusterManagerName(); @@ -155,9 +148,6 @@ public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { final String testIndex = "index-test"; createIndexWithContent(testIndex); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); - final String remoteStoreEnabledIndexName = "remote-index-1"; final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); @@ -230,22 +220,19 @@ public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8610") public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME)); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); final Client clusterManagerClient = internalCluster().clusterManagerClient(); ensureStableCluster(2); final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowCopy(snapshotRepoPath)); + final String testIndex = "index-test"; createIndexWithContent(testIndex); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); - final String remoteStoreEnabledIndexName = "remote-index-1"; final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); @@ -289,6 +276,67 @@ public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == 0); } + public void testRemoteStoreCleanupForDeletedIndex() throws Exception { + disableRepoConsistencyCheck("Remote store repository is being used in the test"); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + final Client clusterManagerClient = internalCluster().clusterManagerClient(); + ensureStableCluster(2); + + final String snapshotRepoName = "snapshot-repo-name"; + final Path snapshotRepoPath = randomRepoPath(); + createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowCopy(snapshotRepoPath)); + + final String testIndex = "index-test"; + createIndexWithContent(testIndex); + + final String remoteStoreEnabledIndexName = "remote-index-1"; + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); + createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); + indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); + + String indexUUID = client().admin() + .indices() + .prepareGetSettings(remoteStoreEnabledIndexName) + .get() + .getSetting(remoteStoreEnabledIndexName, IndexMetadata.SETTING_INDEX_UUID); + + logger.info("--> create two remote index shallow snapshots"); + List shallowCopySnapshots = createNSnapshots(snapshotRepoName, 2); + + String[] lockFiles = getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME); + assert (lockFiles.length == 2) : "lock files are " + Arrays.toString(lockFiles); + + // delete the giremote store index + assertAcked(client().admin().indices().prepareDelete(remoteStoreEnabledIndexName)); + + logger.info("--> delete snapshot 1"); + AcknowledgedResponse deleteSnapshotResponse = clusterManagerClient.admin() + .cluster() + .prepareDeleteSnapshot(snapshotRepoName, shallowCopySnapshots.get(0)) + .get(); + assertAcked(deleteSnapshotResponse); + + lockFiles = getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME, indexUUID); + assert (lockFiles.length == 1) : "lock files are " + Arrays.toString(lockFiles); + + logger.info("--> delete snapshot 2"); + deleteSnapshotResponse = clusterManagerClient.admin() + .cluster() + .prepareDeleteSnapshot(snapshotRepoName, shallowCopySnapshots.get(1)) + .get(); + assertAcked(deleteSnapshotResponse); + + Path indexPath = Path.of(String.valueOf(remoteStoreRepoPath), indexUUID); + // Delete is async. Give time for it + assertBusy(() -> { + try { + assertThat(RemoteStoreBaseIntegTestCase.getFileCount(indexPath), comparesEqualTo(0)); + } catch (Exception e) {} + }, 30, TimeUnit.SECONDS); + } + private List createNSnapshots(String repoName, int count) { final List snapshotNames = new ArrayList<>(count); final String prefix = "snap-" + UUIDs.randomBase64UUID(random()).toLowerCase(Locale.ROOT) + "-"; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java index fb91b1d7a006c..8e2580aba1745 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java @@ -39,8 +39,9 @@ import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; +import org.junit.Before; import java.nio.file.Path; @@ -49,31 +50,34 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteIndexSnapshotStatusApiIT extends AbstractSnapshotIntegTestCase { + protected Path absolutePath; + final String remoteStoreRepoName = "remote-store-repo-name"; + + @Before + public void setup() { + absolutePath = randomRepoPath().toAbsolutePath(); + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0) // We have tests that check by-timestamp order - .put(FeatureFlags.REMOTE_STORE, "true") - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .put(remoteStoreClusterSettings("remote-store-repo-name")) + .put(remoteStoreClusterSettings(remoteStoreRepoName, absolutePath)) .build(); } public void testStatusAPICallForShallowCopySnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used for the test"); internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); + internalCluster().startDataOnlyNodes(2); final String snapshotRepoName = "snapshot-repo-name"; createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy()); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String remoteStoreEnabledIndexName = "remote-index-1"; final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); @@ -104,15 +108,11 @@ public void testStatusAPICallForShallowCopySnapshot() throws Exception { public void testStatusAPIStatsForBackToBackShallowSnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used for the test"); internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); + internalCluster().startDataOnlyNodes(2); final String snapshotRepoName = "snapshot-repo-name"; createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy()); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String remoteStoreEnabledIndexName = "remote-index-1"; final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); @@ -152,15 +152,11 @@ public void testStatusAPIStatsForBackToBackShallowSnapshot() throws Exception { public void testStatusAPICallInProgressShallowSnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used for the test"); internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); + internalCluster().startDataOnlyNodes(2); final String snapshotRepoName = "snapshot-repo-name"; createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowCopy().put("block_on_data", true)); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "mock", remoteStoreRepoPath); - final String remoteStoreEnabledIndexName = "remote-index-1"; final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java index 7327f1127eab2..7117818451e14 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java @@ -45,7 +45,6 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.rest.RestStatus; @@ -82,11 +81,6 @@ import static org.hamcrest.Matchers.nullValue; public class RestoreSnapshotIT extends AbstractSnapshotIntegTestCase { - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(FeatureFlags.REMOTE_STORE, "true").build(); - } - public void testParallelRestoreOperations() { String indexName1 = "testindex1"; String indexName2 = "testindex2"; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java index 62cbf171e8146..75c3a0a23de37 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -1884,7 +1884,7 @@ public void testCannotCreateSnapshotsWithSameName() throws Exception { * This test ensures that when a shard is removed from a node (perhaps due to the node * leaving the cluster, then returning), all snapshotting of that shard is aborted, so * all Store references held onto by the snapshot are released. - * + *

      * See https://github.com/elastic/elasticsearch/issues/20876 */ public void testSnapshotCanceledOnRemovedShard() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java index 30394611ac48f..c574233d25051 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java @@ -47,7 +47,6 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.Strings; import org.opensearch.core.common.unit.ByteSizeUnit; @@ -77,7 +76,6 @@ protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0) // We have tests that check by-timestamp order - .put(FeatureFlags.REMOTE_STORE, "true") .build(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java new file mode 100644 index 0000000000000..f50fc691fb232 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.snapshots; + +import org.opensearch.client.Client; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.repositories.RepositoryException; +import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Before; + +import java.nio.file.Path; + +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SystemRepositoryIT extends AbstractSnapshotIntegTestCase { + protected Path absolutePath; + final String systemRepoName = "system-repo-name"; + + @Before + public void setup() { + absolutePath = randomRepoPath().toAbsolutePath(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(systemRepoName, absolutePath)) + .build(); + } + + public void testRestrictedSettingsCantBeUpdated() { + disableRepoConsistencyCheck("System repository is being used for the test"); + + internalCluster().startNode(); + final Client client = client(); + final Settings.Builder repoSettings = Settings.builder().put("location", randomRepoPath()); + + RepositoryException e = expectThrows( + RepositoryException.class, + () -> client.admin().cluster().preparePutRepository(systemRepoName).setType("mock").setSettings(repoSettings).get() + ); + assertEquals( + e.getMessage(), + "[system-repo-name] trying to modify an unmodifiable attribute type of system " + + "repository from current value [fs] to new value [mock]" + ); + } + + public void testSystemRepositoryNonRestrictedSettingsCanBeUpdated() { + disableRepoConsistencyCheck("System repository is being used for the test"); + + internalCluster().startNode(); + final Client client = client(); + final Settings.Builder repoSettings = Settings.builder().put("location", absolutePath).put("chunk_size", new ByteSizeValue(20)); + + assertAcked( + client.admin().cluster().preparePutRepository(systemRepoName).setType(FsRepository.TYPE).setSettings(repoSettings).get() + ); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java index 442268d513fc3..b46d27bafb2a5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/update/UpdateIT.java @@ -50,7 +50,7 @@ import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.action.ActionListener; import org.opensearch.core.rest.RestStatus; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.DocumentMissingException; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.plugins.Plugin; @@ -669,7 +669,7 @@ public void run() { public void testStressUpdateDeleteConcurrency() throws Exception { // We create an index with merging disabled so that deletes don't get merged away - assertAcked(prepareCreate("test").setSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false))); + assertAcked(prepareCreate("test").setSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false))); ensureGreen(); Script fieldIncScript = new Script(ScriptType.INLINE, UPDATE_SCRIPTS, FIELD_INC_SCRIPT, Collections.singletonMap("field", "field")); diff --git a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java index c651689e21d3d..7f016caf22149 100644 --- a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java @@ -93,7 +93,7 @@ * provided the primaryTerm and seqNo still matches. The reason we cannot assume it will not take place after receiving the failure * is that a request can fork into two because of retries on disconnect, and now race against itself. The retry might complete (and do a * dirty or stale read) before the forked off request gets to execute, and that one might still subsequently succeed. - * + *

      * Such writes are not necessarily fully replicated and can be lost. There is no * guarantee that the previous value did not have the specified primaryTerm and seqNo *

    • CAS writes with other exceptions might or might not have taken place. If they have taken place, then after invocation but not diff --git a/server/src/main/java/org/apache/lucene/index/OneMergeHelper.java b/server/src/main/java/org/apache/lucene/index/OneMergeHelper.java index 8f67bb87b5c42..0ada02a09d157 100644 --- a/server/src/main/java/org/apache/lucene/index/OneMergeHelper.java +++ b/server/src/main/java/org/apache/lucene/index/OneMergeHelper.java @@ -59,8 +59,8 @@ public static double getMbPerSec(Thread thread, MergePolicy.OneMerge merge) { * Returns total bytes written by this merge. **/ public static long getTotalBytesWritten(Thread thread, MergePolicy.OneMerge merge) throws IOException { - /** - * TODO: The number of bytes written during the merge should be accessible in OneMerge. + /* + TODO: The number of bytes written during the merge should be accessible in OneMerge. */ if (thread instanceof ConcurrentMergeScheduler.MergeThread) { return ((ConcurrentMergeScheduler.MergeThread) thread).rateLimiter.getTotalBytesWritten(); diff --git a/server/src/main/java/org/apache/lucene/queryparser/classic/XQueryParser.java b/server/src/main/java/org/apache/lucene/queryparser/classic/XQueryParser.java index 9a8c295d60ec7..2f00ea69207a7 100644 --- a/server/src/main/java/org/apache/lucene/queryparser/classic/XQueryParser.java +++ b/server/src/main/java/org/apache/lucene/queryparser/classic/XQueryParser.java @@ -38,7 +38,7 @@ /** * This class is just a workaround to make {@link QueryParser#handleBareFuzzy(String, Token, String)} accessible by sub-classes. * It is needed for {@link QueryParser}s that need to override the parsing of the slop in a fuzzy query (e.g. word~2, word~). - * + *

      * TODO: We should maybe rewrite this with the flexible query parser which matches the same syntax with more freedom. */ public class XQueryParser extends QueryParser { diff --git a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java index e4c299ba572b1..9ca0491bc29f5 100644 --- a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java +++ b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java @@ -50,7 +50,7 @@ * A collector that groups documents based on field values and returns {@link CollapseTopFieldDocs} * output. The collapsing is done in a single pass by selecting only the top sorted document per collapse key. * The value used for the collapse key of each group can be found in {@link CollapseTopFieldDocs#collapseValues}. - * + *

      * TODO: If the sort is based on score we should propagate the mininum competitive score when orderedGroups is full. * This is safe for collapsing since the group sort is the same as the query sort. */ diff --git a/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java b/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java index 6fde39b16a59a..4edcdea42b53b 100644 --- a/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java +++ b/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java @@ -43,13 +43,13 @@ * If the {@link BreakIterator} cannot find a passage smaller than the maximum length, * a secondary break iterator is used to re-split the passage at the first boundary after * maximum length. - * + *

      * This is useful to split passages created by {@link BreakIterator}s like `sentence` that * can create big outliers on semi-structured text. - * + *

      * * WARNING: This break iterator is designed to work with the {@link UnifiedHighlighter}. - * + *

      * TODO: We should be able to create passages incrementally, starting from the offset of the first match and expanding or not * depending on the offsets of subsequent matches. This is currently impossible because {@link FieldHighlighter} uses * only the first matching offset to derive the start and end of each passage. diff --git a/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java b/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java index 4777b77cfbfed..9e9f6d1fd817d 100644 --- a/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java +++ b/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java @@ -38,8 +38,8 @@ /** * Forked from Lucene 8.x; removed in Lucene 9.0 * - * @todo further investigate a better alternative - * + * TODO: further investigate a better alternative + *

      * Space optimized random access capable array of values with a fixed number of bits/value. Values * are packed contiguously. * diff --git a/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java b/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java index 0324522e9a68d..53cf4ed8e2273 100644 --- a/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java +++ b/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java @@ -25,8 +25,8 @@ /** * Forked from Lucene 8.x; removed in Lucene 9.0 * - * @todo further investigate a better alternative - * + * TODO: further investigate a better alternative + *

      * This class is similar to {@link Packed64} except that it trades space for speed by ensuring that * a single block needs to be read/written in order to read/write a value. */ diff --git a/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java b/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java index f94a4531a7db9..4260d34ead7c9 100644 --- a/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java +++ b/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java @@ -35,9 +35,9 @@ /** * Forked from Lucene 8.x; removed in Lucene 8.9 - * + *

      * Todo: further investigate a better alternative - * + *

      * Simplistic compression for array of unsigned long values. Each value is {@code >= 0} and {@code * <=} a specified maximum value. The values are stored as packed ints, with each value consuming a * fixed number of bits. diff --git a/server/src/main/java/org/opensearch/OpenSearchServerException.java b/server/src/main/java/org/opensearch/OpenSearchServerException.java index e9b4ad38f7a52..10c5beb46092f 100644 --- a/server/src/main/java/org/opensearch/OpenSearchServerException.java +++ b/server/src/main/java/org/opensearch/OpenSearchServerException.java @@ -8,9 +8,12 @@ package org.opensearch; +import org.opensearch.crypto.CryptoRegistryException; + import static org.opensearch.OpenSearchException.OpenSearchExceptionHandle; import static org.opensearch.OpenSearchException.OpenSearchExceptionHandleRegistry.registerExceptionHandle; import static org.opensearch.OpenSearchException.UNKNOWN_VERSION_ADDED; +import static org.opensearch.Version.V_2_10_0; import static org.opensearch.Version.V_2_1_0; import static org.opensearch.Version.V_2_3_0; import static org.opensearch.Version.V_2_4_0; @@ -1175,6 +1178,7 @@ public static void registerExceptions() { V_2_7_0 ) ); + registerExceptionHandle(new OpenSearchExceptionHandle(CryptoRegistryException.class, CryptoRegistryException::new, 171, V_2_10_0)); registerExceptionHandle( new OpenSearchExceptionHandle( org.opensearch.cluster.block.IndexCreateBlockException.class, diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 7fbc4be4b6d06..7b0b725c88f64 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -972,12 +972,8 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestCatAction(catActions)); registerHandler.accept(new RestDecommissionAction()); registerHandler.accept(new RestGetDecommissionStateAction()); - - // Remote Store APIs - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - registerHandler.accept(new RestRemoteStoreStatsAction()); - registerHandler.accept(new RestRestoreRemoteStoreAction()); - } + registerHandler.accept(new RestRemoteStoreStatsAction()); + registerHandler.accept(new RestRestoreRemoteStoreAction()); } @Override diff --git a/server/src/main/java/org/opensearch/action/AliasesRequest.java b/server/src/main/java/org/opensearch/action/AliasesRequest.java index 4c5d5628b1aac..3632ba2d7304f 100644 --- a/server/src/main/java/org/opensearch/action/AliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/AliasesRequest.java @@ -54,7 +54,7 @@ public interface AliasesRequest extends IndicesRequest.Replaceable { /** * Replaces current aliases with the provided aliases. - * + *

      * Sometimes aliases expressions need to be resolved to concrete aliases prior to executing the transport action. */ void replaceAliases(String... aliases); diff --git a/server/src/main/java/org/opensearch/action/DocWriteRequest.java b/server/src/main/java/org/opensearch/action/DocWriteRequest.java index e5979e93d7040..3ce5a26140ed3 100644 --- a/server/src/main/java/org/opensearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/opensearch/action/DocWriteRequest.java @@ -149,7 +149,7 @@ public interface DocWriteRequest extends IndicesRequest, Accountable { /** * If set, only perform this request if the document was last modification was assigned this primary term. - * + *

      * If the document last modification was assigned a different term a * {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown. */ diff --git a/server/src/main/java/org/opensearch/action/DocWriteResponse.java b/server/src/main/java/org/opensearch/action/DocWriteResponse.java index afdb1d3a0bdd9..e3052b3b80035 100644 --- a/server/src/main/java/org/opensearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/opensearch/action/DocWriteResponse.java @@ -341,7 +341,7 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t /** * Parse the output of the {@link #innerToXContent(XContentBuilder, Params)} method. - * + *

      * This method is intended to be called by subclasses and must be called multiple times to parse all the information concerning * {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly * if needed and then immediately returns. diff --git a/server/src/main/java/org/opensearch/action/TaskOperationFailure.java b/server/src/main/java/org/opensearch/action/TaskOperationFailure.java index 0930bd2741810..5948dd3e2b7cb 100644 --- a/server/src/main/java/org/opensearch/action/TaskOperationFailure.java +++ b/server/src/main/java/org/opensearch/action/TaskOperationFailure.java @@ -50,7 +50,7 @@ /** * Information about task operation failures - * + *

      * The class is final due to serialization limitations * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java index 625aa91e6ea7f..3dec781f0acf4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java @@ -95,7 +95,7 @@ public ClusterAllocationExplainRequest(StreamInput in) throws IOException { * Create a new allocation explain request. If {@code primary} is false, the first unassigned replica * will be picked for explanation. If no replicas are unassigned, the first assigned replica will * be explained. - * + *

      * Package private for testing. */ ClusterAllocationExplainRequest(String index, int shard, boolean primary, @Nullable String currentNode) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java b/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java new file mode 100644 index 0000000000000..bd783b349bed4 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/crypto/CryptoSettings.java @@ -0,0 +1,181 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.crypto; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; + +import static org.opensearch.action.ValidateActions.addValidationError; +import static org.opensearch.common.settings.Settings.Builder.EMPTY_SETTINGS; +import static org.opensearch.common.settings.Settings.readSettingsFromStream; +import static org.opensearch.common.settings.Settings.writeSettingsToStream; + +/** + * Crypto settings supplied during a put repository request + * + * @opensearch.internal + */ +public class CryptoSettings implements Writeable, ToXContentObject { + private String keyProviderName; + private String keyProviderType; + private Settings settings = EMPTY_SETTINGS; + + public CryptoSettings(StreamInput in) throws IOException { + keyProviderName = in.readString(); + keyProviderType = in.readString(); + settings = readSettingsFromStream(in); + } + + public CryptoSettings(String keyProviderName) { + this.keyProviderName = keyProviderName; + } + + /** + * Validate settings supplied in put repository request. + * @return Exception in case validation fails. + */ + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (keyProviderName == null) { + validationException = addValidationError("key_provider_name is missing", validationException); + } + if (keyProviderType == null) { + validationException = addValidationError("key_provider_type is missing", validationException); + } + return validationException; + } + + /** + * Returns key provider name + * @return keyProviderName + */ + public String getKeyProviderName() { + return keyProviderName; + } + + /** + * Returns key provider type + * @return keyProviderType + */ + public String getKeyProviderType() { + return keyProviderType; + } + + /** + * Returns crypto settings + * @return settings + */ + public Settings getSettings() { + return settings; + } + + /** + * Constructs a new crypto settings with provided key provider name. + * @param keyProviderName Name of the key provider + */ + public CryptoSettings keyProviderName(String keyProviderName) { + this.keyProviderName = keyProviderName; + return this; + } + + /** + * Constructs a new crypto settings with provided key provider type. + * @param keyProviderType Type of key provider to be used in encryption. + */ + public CryptoSettings keyProviderType(String keyProviderType) { + this.keyProviderType = keyProviderType; + return this; + } + + /** + * Sets the encryption settings + * + * @param settings for encryption + * @return this request + */ + public CryptoSettings settings(Settings.Builder settings) { + this.settings = settings.build(); + return this; + } + + /** + * Sets the encryption settings. + * + * @param source encryption settings in json or yaml format + * @param xContentType the content type of the source + * @return this request + */ + public CryptoSettings settings(String source, XContentType xContentType) { + this.settings = Settings.builder().loadFromSource(source, xContentType).build(); + return this; + } + + /** + * Sets the encryption settings. + * + * @param source encryption settings + * @return this request + */ + public CryptoSettings settings(Map source) { + this.settings = Settings.builder().loadFromMap(source).build(); + return this; + } + + /** + * Parses crypto settings definition. + * + * @param cryptoDefinition crypto settings definition + */ + public CryptoSettings(Map cryptoDefinition) { + for (Map.Entry entry : cryptoDefinition.entrySet()) { + if (entry.getKey().equals("key_provider_name")) { + keyProviderName(entry.getValue().toString()); + } else if (entry.getKey().equals("key_provider_type")) { + keyProviderType(entry.getValue().toString()); + } else if (entry.getKey().equals("settings")) { + if (!(entry.getValue() instanceof Map)) { + throw new IllegalArgumentException("Malformed settings section in crypto settings, should include an inner object"); + } + @SuppressWarnings("unchecked") + Map sub = (Map) entry.getValue(); + settings(sub); + } + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(keyProviderName); + out.writeString(keyProviderType); + writeSettingsToStream(settings, out); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("key_provider_name", keyProviderName); + builder.field("key_provider_type", keyProviderType); + + builder.startObject("settings"); + settings.toXContent(builder, params); + builder.endObject(); + + builder.endObject(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/crypto/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/crypto/package-info.java new file mode 100644 index 0000000000000..bb9375c20c87e --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/crypto/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Crypto client request and settings handlers. + */ +package org.opensearch.action.admin.cluster.crypto; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index ce8033e971b44..58801c660e03f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -47,6 +47,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.discovery.DiscoveryStats; import org.opensearch.http.HttpStats; +import org.opensearch.index.SegmentReplicationRejectionStats; import org.opensearch.index.stats.IndexingPressureStats; import org.opensearch.index.stats.ShardIndexingPressureStats; import org.opensearch.index.store.remote.filecache.FileCacheStats; @@ -57,6 +58,8 @@ import org.opensearch.monitor.os.OsStats; import org.opensearch.monitor.process.ProcessStats; import org.opensearch.node.AdaptiveSelectionStats; +import org.opensearch.node.NodesResourceUsageStats; +import org.opensearch.repositories.RepositoriesStats; import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; import org.opensearch.search.backpressure.stats.SearchBackpressureStats; @@ -128,6 +131,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private SearchBackpressureStats searchBackpressureStats; + @Nullable + private SegmentReplicationRejectionStats segmentReplicationRejectionStats; + @Nullable private ClusterManagerThrottlingStats clusterManagerThrottlingStats; @@ -143,6 +149,12 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private SearchPipelineStats searchPipelineStats; + @Nullable + private NodesResourceUsageStats resourceUsageStats; + + @Nullable + private RepositoriesStats repositoriesStats; + public NodeStats(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); @@ -211,6 +223,21 @@ public NodeStats(StreamInput in) throws IOException { } else { searchPipelineStats = null; } + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + resourceUsageStats = in.readOptionalWriteable(NodesResourceUsageStats::new); + } else { + resourceUsageStats = null; + } + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + segmentReplicationRejectionStats = in.readOptionalWriteable(SegmentReplicationRejectionStats::new); + } else { + segmentReplicationRejectionStats = null; + } + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + repositoriesStats = in.readOptionalWriteable(RepositoriesStats::new); + } else { + repositoriesStats = null; + } } public NodeStats( @@ -229,6 +256,7 @@ public NodeStats( @Nullable DiscoveryStats discoveryStats, @Nullable IngestStats ingestStats, @Nullable AdaptiveSelectionStats adaptiveSelectionStats, + @Nullable NodesResourceUsageStats resourceUsageStats, @Nullable ScriptCacheStats scriptCacheStats, @Nullable IndexingPressureStats indexingPressureStats, @Nullable ShardIndexingPressureStats shardIndexingPressureStats, @@ -237,7 +265,9 @@ public NodeStats( @Nullable WeightedRoutingStats weightedRoutingStats, @Nullable FileCacheStats fileCacheStats, @Nullable TaskCancellationStats taskCancellationStats, - @Nullable SearchPipelineStats searchPipelineStats + @Nullable SearchPipelineStats searchPipelineStats, + @Nullable SegmentReplicationRejectionStats segmentReplicationRejectionStats, + @Nullable RepositoriesStats repositoriesStats ) { super(node); this.timestamp = timestamp; @@ -254,6 +284,7 @@ public NodeStats( this.discoveryStats = discoveryStats; this.ingestStats = ingestStats; this.adaptiveSelectionStats = adaptiveSelectionStats; + this.resourceUsageStats = resourceUsageStats; this.scriptCacheStats = scriptCacheStats; this.indexingPressureStats = indexingPressureStats; this.shardIndexingPressureStats = shardIndexingPressureStats; @@ -263,6 +294,8 @@ public NodeStats( this.fileCacheStats = fileCacheStats; this.taskCancellationStats = taskCancellationStats; this.searchPipelineStats = searchPipelineStats; + this.segmentReplicationRejectionStats = segmentReplicationRejectionStats; + this.repositoriesStats = repositoriesStats; } public long getTimestamp() { @@ -357,6 +390,11 @@ public AdaptiveSelectionStats getAdaptiveSelectionStats() { return adaptiveSelectionStats; } + @Nullable + public NodesResourceUsageStats getResourceUsageStats() { + return resourceUsageStats; + } + @Nullable public ScriptCacheStats getScriptCacheStats() { return scriptCacheStats; @@ -400,6 +438,16 @@ public SearchPipelineStats getSearchPipelineStats() { return searchPipelineStats; } + @Nullable + public SegmentReplicationRejectionStats getSegmentReplicationRejectionStats() { + return segmentReplicationRejectionStats; + } + + @Nullable + public RepositoriesStats getRepositoriesStats() { + return repositoriesStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -449,6 +497,15 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_9_0)) { out.writeOptionalWriteable(searchPipelineStats); } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(resourceUsageStats); + } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(segmentReplicationRejectionStats); + } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(repositoriesStats); + } } @Override @@ -539,7 +596,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (getSearchPipelineStats() != null) { getSearchPipelineStats().toXContent(builder, params); } - + if (getResourceUsageStats() != null) { + getResourceUsageStats().toXContent(builder, params); + } + if (getSegmentReplicationRejectionStats() != null) { + getSegmentReplicationRejectionStats().toXContent(builder, params); + } + if (getRepositoriesStats() != null) { + getRepositoriesStats().toXContent(builder, params); + } return builder; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 5022427628647..fbc811263c042 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -244,7 +244,10 @@ public enum Metric { WEIGHTED_ROUTING_STATS("weighted_routing"), FILE_CACHE_STATS("file_cache"), TASK_CANCELLATION("task_cancellation"), - SEARCH_PIPELINE("search_pipeline"); + SEARCH_PIPELINE("search_pipeline"), + RESOURCE_USAGE_STATS("resource_usage_stats"), + SEGMENT_REPLICATION_BACKPRESSURE("segment_replication_backpressure"), + REPOSITORIES("repositories"); private String metricName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 2c1a61ea6e3b2..f93dd50e28a72 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -124,7 +124,10 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.WEIGHTED_ROUTING_STATS.containedIn(metrics), NodesStatsRequest.Metric.FILE_CACHE_STATS.containedIn(metrics), NodesStatsRequest.Metric.TASK_CANCELLATION.containedIn(metrics), - NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics) + NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics), + NodesStatsRequest.Metric.RESOURCE_USAGE_STATS.containedIn(metrics), + NodesStatsRequest.Metric.SEGMENT_REPLICATION_BACKPRESSURE.containedIn(metrics), + NodesStatsRequest.Metric.REPOSITORIES.containedIn(metrics) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index 8d82827f4ee50..e62c83490d810 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -67,7 +67,7 @@ /** * ActionType to get a single task. If the task isn't running then it'll try to request the status from request index. - * + *

      * The general flow is: *

        *
      • If this isn't being executed on the node to which the requested TaskId belongs then move to that node. diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java index aa8a8ec098ead..f292fcec7ccac 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java @@ -15,6 +15,7 @@ import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import java.io.IOException; @@ -29,15 +30,25 @@ public class RemoteStoreStats implements Writeable, ToXContentFragment { */ private final RemoteSegmentTransferTracker.Stats remoteSegmentShardStats; + /** + * Stats related to Remote Translog Store operations + */ + private final RemoteTranslogTransferTracker.Stats remoteTranslogShardStats; private final ShardRouting shardRouting; - public RemoteStoreStats(RemoteSegmentTransferTracker.Stats remoteSegmentUploadShardStats, ShardRouting shardRouting) { + RemoteStoreStats( + RemoteSegmentTransferTracker.Stats remoteSegmentUploadShardStats, + RemoteTranslogTransferTracker.Stats remoteTranslogShardStats, + ShardRouting shardRouting + ) { this.remoteSegmentShardStats = remoteSegmentUploadShardStats; + this.remoteTranslogShardStats = remoteTranslogShardStats; this.shardRouting = shardRouting; } - public RemoteStoreStats(StreamInput in) throws IOException { - this.remoteSegmentShardStats = in.readOptionalWriteable(RemoteSegmentTransferTracker.Stats::new); + RemoteStoreStats(StreamInput in) throws IOException { + remoteSegmentShardStats = in.readOptionalWriteable(RemoteSegmentTransferTracker.Stats::new); + remoteTranslogShardStats = in.readOptionalWriteable(RemoteTranslogTransferTracker.Stats::new); this.shardRouting = new ShardRouting(in); } @@ -49,10 +60,15 @@ public ShardRouting getShardRouting() { return shardRouting; } + public RemoteTranslogTransferTracker.Stats getTranslogStats() { + return remoteTranslogShardStats; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); buildShardRouting(builder); + builder.startObject(Fields.SEGMENT); builder.startObject(SubFields.DOWNLOAD); // Ensuring that we are not showing 0 metrics to the user @@ -67,15 +83,88 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endObject(); // segment.upload builder.endObject(); // segment + + builder.startObject(Fields.TRANSLOG); + builder.startObject(SubFields.UPLOAD); + // Ensuring that we are not showing 0 metrics to the user + if (remoteTranslogShardStats.totalUploadsStarted > 0) { + buildTranslogUploadStats(builder); + } + builder.endObject(); // translog.upload + builder.startObject(SubFields.DOWNLOAD); + // Ensuring that we are not showing 0 metrics to the user + if (remoteTranslogShardStats.totalDownloadsSucceeded > 0) { + buildTranslogDownloadStats(builder); + } + builder.endObject(); // translog.download + builder.endObject(); // translog + return builder.endObject(); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(remoteSegmentShardStats); + out.writeOptionalWriteable(remoteTranslogShardStats); shardRouting.writeTo(out); } + private void buildTranslogUploadStats(XContentBuilder builder) throws IOException { + builder.field(UploadStatsFields.LAST_SUCCESSFUL_UPLOAD_TIMESTAMP, remoteTranslogShardStats.lastSuccessfulUploadTimestamp); + + builder.startObject(UploadStatsFields.TOTAL_UPLOADS); + builder.field(SubFields.STARTED, remoteTranslogShardStats.totalUploadsStarted) + .field(SubFields.FAILED, remoteTranslogShardStats.totalUploadsFailed) + .field(SubFields.SUCCEEDED, remoteTranslogShardStats.totalUploadsSucceeded); + builder.endObject(); + + builder.startObject(UploadStatsFields.TOTAL_UPLOAD_SIZE); + builder.field(SubFields.STARTED_BYTES, remoteTranslogShardStats.uploadBytesStarted) + .field(SubFields.FAILED_BYTES, remoteTranslogShardStats.uploadBytesFailed) + .field(SubFields.SUCCEEDED_BYTES, remoteTranslogShardStats.uploadBytesSucceeded); + builder.endObject(); + + builder.field(UploadStatsFields.TOTAL_UPLOAD_TIME_IN_MILLIS, remoteTranslogShardStats.totalUploadTimeInMillis); + + builder.startObject(UploadStatsFields.UPLOAD_SIZE_IN_BYTES); + builder.field(SubFields.MOVING_AVG, remoteTranslogShardStats.uploadBytesMovingAverage); + builder.endObject(); + + builder.startObject(UploadStatsFields.UPLOAD_SPEED_IN_BYTES_PER_SEC); + builder.field(SubFields.MOVING_AVG, remoteTranslogShardStats.uploadBytesPerSecMovingAverage); + builder.endObject(); + + builder.startObject(UploadStatsFields.UPLOAD_TIME_IN_MILLIS); + builder.field(SubFields.MOVING_AVG, remoteTranslogShardStats.uploadTimeMovingAverage); + builder.endObject(); + } + + private void buildTranslogDownloadStats(XContentBuilder builder) throws IOException { + builder.field(DownloadStatsFields.LAST_SUCCESSFUL_DOWNLOAD_TIMESTAMP, remoteTranslogShardStats.lastSuccessfulDownloadTimestamp); + + builder.startObject(DownloadStatsFields.TOTAL_DOWNLOADS); + builder.field(SubFields.SUCCEEDED, remoteTranslogShardStats.totalDownloadsSucceeded); + builder.endObject(); + + builder.startObject(DownloadStatsFields.TOTAL_DOWNLOAD_SIZE); + builder.field(SubFields.SUCCEEDED_BYTES, remoteTranslogShardStats.downloadBytesSucceeded); + builder.endObject(); + + builder.field(DownloadStatsFields.TOTAL_DOWNLOAD_TIME_IN_MILLIS, remoteTranslogShardStats.totalDownloadTimeInMillis); + + builder.startObject(DownloadStatsFields.DOWNLOAD_SIZE_IN_BYTES); + builder.field(SubFields.MOVING_AVG, remoteTranslogShardStats.downloadBytesMovingAverage); + builder.endObject(); + + builder.startObject(DownloadStatsFields.DOWNLOAD_SPEED_IN_BYTES_PER_SEC); + builder.field(SubFields.MOVING_AVG, remoteTranslogShardStats.downloadBytesPerSecMovingAverage); + builder.endObject(); + + builder.startObject(DownloadStatsFields.DOWNLOAD_TIME_IN_MILLIS); + builder.field(SubFields.MOVING_AVG, remoteTranslogShardStats.downloadTimeMovingAverage); + builder.endObject(); + } + private void buildSegmentUploadStats(XContentBuilder builder) throws IOException { builder.field(UploadStatsFields.LOCAL_REFRESH_TIMESTAMP, remoteSegmentShardStats.localRefreshClockTimeMs) .field(UploadStatsFields.REMOTE_REFRESH_TIMESTAMP, remoteSegmentShardStats.remoteRefreshClockTimeMs) @@ -84,21 +173,21 @@ private void buildSegmentUploadStats(XContentBuilder builder) throws IOException .field(UploadStatsFields.BYTES_LAG, remoteSegmentShardStats.bytesLag) .field(UploadStatsFields.BACKPRESSURE_REJECTION_COUNT, remoteSegmentShardStats.rejectionCount) .field(UploadStatsFields.CONSECUTIVE_FAILURE_COUNT, remoteSegmentShardStats.consecutiveFailuresCount); - builder.startObject(UploadStatsFields.TOTAL_SYNCS_TO_REMOTE) + builder.startObject(UploadStatsFields.TOTAL_UPLOADS) .field(SubFields.STARTED, remoteSegmentShardStats.totalUploadsStarted) .field(SubFields.SUCCEEDED, remoteSegmentShardStats.totalUploadsSucceeded) .field(SubFields.FAILED, remoteSegmentShardStats.totalUploadsFailed); builder.endObject(); - builder.startObject(UploadStatsFields.TOTAL_UPLOADS_IN_BYTES) - .field(SubFields.STARTED, remoteSegmentShardStats.uploadBytesStarted) - .field(SubFields.SUCCEEDED, remoteSegmentShardStats.uploadBytesSucceeded) - .field(SubFields.FAILED, remoteSegmentShardStats.uploadBytesFailed); + builder.startObject(UploadStatsFields.TOTAL_UPLOAD_SIZE) + .field(SubFields.STARTED_BYTES, remoteSegmentShardStats.uploadBytesStarted) + .field(SubFields.SUCCEEDED_BYTES, remoteSegmentShardStats.uploadBytesSucceeded) + .field(SubFields.FAILED_BYTES, remoteSegmentShardStats.uploadBytesFailed); builder.endObject(); builder.startObject(UploadStatsFields.REMOTE_REFRESH_SIZE_IN_BYTES) .field(SubFields.LAST_SUCCESSFUL, remoteSegmentShardStats.lastSuccessfulRemoteRefreshBytes) .field(SubFields.MOVING_AVG, remoteSegmentShardStats.uploadBytesMovingAverage); builder.endObject(); - builder.startObject(UploadStatsFields.UPLOAD_LATENCY_IN_BYTES_PER_SEC) + builder.startObject(UploadStatsFields.UPLOAD_SPEED_IN_BYTES_PER_SEC) .field(SubFields.MOVING_AVG, remoteSegmentShardStats.uploadBytesPerSecMovingAverage); builder.endObject(); builder.startObject(UploadStatsFields.REMOTE_REFRESH_LATENCY_IN_MILLIS) @@ -111,10 +200,10 @@ private void buildSegmentDownloadStats(XContentBuilder builder) throws IOExcepti DownloadStatsFields.LAST_SYNC_TIMESTAMP, remoteSegmentShardStats.directoryFileTransferTrackerStats.lastTransferTimestampMs ); - builder.startObject(DownloadStatsFields.TOTAL_DOWNLOADS_IN_BYTES) - .field(SubFields.STARTED, remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesStarted) - .field(SubFields.SUCCEEDED, remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesSucceeded) - .field(SubFields.FAILED, remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesFailed); + builder.startObject(DownloadStatsFields.TOTAL_DOWNLOAD_SIZE) + .field(SubFields.STARTED_BYTES, remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesStarted) + .field(SubFields.SUCCEEDED_BYTES, remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesSucceeded) + .field(SubFields.FAILED_BYTES, remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesFailed); builder.endObject(); builder.startObject(DownloadStatsFields.DOWNLOAD_SIZE_IN_BYTES) .field(SubFields.LAST_SUCCESSFUL, remoteSegmentShardStats.directoryFileTransferTrackerStats.lastSuccessfulTransferInBytes) @@ -133,6 +222,9 @@ private void buildShardRouting(XContentBuilder builder) throws IOException { builder.endObject(); } + /** + * Fields for remote store stats response + */ static final class Fields { static final String ROUTING = "routing"; static final String SEGMENT = "segment"; @@ -148,7 +240,7 @@ static final class RoutingFields { /** * Fields for remote store stats response */ - static final class UploadStatsFields { + public static final class UploadStatsFields { /** * Lag in terms of bytes b/w local and remote store */ @@ -185,63 +277,107 @@ static final class UploadStatsFields { static final String CONSECUTIVE_FAILURE_COUNT = "consecutive_failure_count"; /** - * Represents the number of remote refreshes + * Represents the size of new data to be uploaded as part of a refresh */ - static final String TOTAL_SYNCS_TO_REMOTE = "total_syncs_to_remote"; + static final String REMOTE_REFRESH_SIZE_IN_BYTES = "remote_refresh_size_in_bytes"; + + /** + * Time taken by a single remote refresh + */ + static final String REMOTE_REFRESH_LATENCY_IN_MILLIS = "remote_refresh_latency_in_millis"; + + /** + * Timestamp of last successful remote store upload + */ + static final String LAST_SUCCESSFUL_UPLOAD_TIMESTAMP = "last_successful_upload_timestamp"; + + /** + * Count of files uploaded to remote store + */ + public static final String TOTAL_UPLOADS = "total_uploads"; /** * Represents the total uploads to remote store in bytes */ - static final String TOTAL_UPLOADS_IN_BYTES = "total_uploads_in_bytes"; + public static final String TOTAL_UPLOAD_SIZE = "total_upload_size"; /** - * Represents the size of new data to be uploaded as part of a refresh + * Total time spent on remote store uploads */ - static final String REMOTE_REFRESH_SIZE_IN_BYTES = "remote_refresh_size_in_bytes"; + static final String TOTAL_UPLOAD_TIME_IN_MILLIS = "total_upload_time_in_millis"; + + /** + * Represents the size of new data to be transferred as part of a remote store upload + */ + static final String UPLOAD_SIZE_IN_BYTES = "upload_size_in_bytes"; /** * Represents the speed of remote store uploads in bytes per sec */ - static final String UPLOAD_LATENCY_IN_BYTES_PER_SEC = "upload_latency_in_bytes_per_sec"; + static final String UPLOAD_SPEED_IN_BYTES_PER_SEC = "upload_speed_in_bytes_per_sec"; /** - * Time taken by a single remote refresh + * Time taken by a remote store upload */ - static final String REMOTE_REFRESH_LATENCY_IN_MILLIS = "remote_refresh_latency_in_millis"; + static final String UPLOAD_TIME_IN_MILLIS = "upload_time_in_millis"; } static final class DownloadStatsFields { + /** + * Epoch timestamp of the last successful download + */ + public static final String LAST_SUCCESSFUL_DOWNLOAD_TIMESTAMP = "last_successful_download_timestamp"; + /** * Last successful sync from remote in milliseconds */ static final String LAST_SYNC_TIMESTAMP = "last_sync_timestamp"; /** - * Total bytes of segment files downloaded from the remote store for a specific shard + * Count of files downloaded from remote store + */ + public static final String TOTAL_DOWNLOADS = "total_downloads"; + + /** + * Total time spent in downloads from remote store + */ + public static final String TOTAL_DOWNLOAD_TIME_IN_MILLIS = "total_download_time_in_millis"; + + /** + * Total bytes of files downloaded from the remote store */ - static final String TOTAL_DOWNLOADS_IN_BYTES = "total_downloads_in_bytes"; + static final String TOTAL_DOWNLOAD_SIZE = "total_download_size"; /** - * Size of each segment file downloaded from the remote store + * Average size of a file downloaded from the remote store */ static final String DOWNLOAD_SIZE_IN_BYTES = "download_size_in_bytes"; /** - * Speed (in bytes/sec) for segment file downloads + * Average speed (in bytes/sec) of a remote store download */ static final String DOWNLOAD_SPEED_IN_BYTES_PER_SEC = "download_speed_in_bytes_per_sec"; + + /** + * Average time spent on a remote store download + */ + public static final String DOWNLOAD_TIME_IN_MILLIS = "download_time_in_millis"; } /** * Reusable sub fields for {@link UploadStatsFields} and {@link DownloadStatsFields} */ - static final class SubFields { - static final String STARTED = "started"; - static final String SUCCEEDED = "succeeded"; - static final String FAILED = "failed"; + public static final class SubFields { + public static final String STARTED = "started"; + public static final String SUCCEEDED = "succeeded"; + public static final String FAILED = "failed"; + + public static final String STARTED_BYTES = "started_bytes"; + public static final String SUCCEEDED_BYTES = "succeeded_bytes"; + public static final String FAILED_BYTES = "failed_bytes"; static final String DOWNLOAD = "download"; - static final String UPLOAD = "upload"; + public static final String UPLOAD = "upload"; /** * Moving avg over last N values stat diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsAction.java index d05879aa1ae78..bd8db4a160bf6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsAction.java @@ -24,7 +24,8 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.IndexService; import org.opensearch.index.remote.RemoteSegmentTransferTracker; -import org.opensearch.index.remote.RemoteStorePressureService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.indices.IndicesService; @@ -50,7 +51,7 @@ public class TransportRemoteStoreStatsAction extends TransportBroadcastByNodeAct private final IndicesService indicesService; - private final RemoteStorePressureService remoteStorePressureService; + private final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; @Inject public TransportRemoteStoreStatsAction( @@ -59,7 +60,7 @@ public TransportRemoteStoreStatsAction( IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - RemoteStorePressureService remoteStorePressureService + RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) { super( RemoteStoreStatsAction.NAME, @@ -71,7 +72,7 @@ public TransportRemoteStoreStatsAction( ThreadPool.Names.MANAGEMENT ); this.indicesService = indicesService; - this.remoteStorePressureService = remoteStorePressureService; + this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; } /** @@ -153,10 +154,15 @@ protected RemoteStoreStats shardOperation(RemoteStoreStatsRequest request, Shard throw new ShardNotFoundException(indexShard.shardId()); } - RemoteSegmentTransferTracker remoteSegmentTransferTracker = remoteStorePressureService.getRemoteRefreshSegmentTracker( + RemoteSegmentTransferTracker remoteSegmentTransferTracker = remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker( indexShard.shardId() ); assert Objects.nonNull(remoteSegmentTransferTracker); - return new RemoteStoreStats(remoteSegmentTransferTracker.stats(), indexShard.routingEntry()); + RemoteTranslogTransferTracker remoteTranslogTransferTracker = remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker( + indexShard.shardId() + ); + assert Objects.nonNull(remoteTranslogTransferTracker); + + return new RemoteStoreStats(remoteSegmentTransferTracker.stats(), remoteTranslogTransferTracker.stats(), indexShard.routingEntry()); } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index fa22daf2ca038..b9a4b3f2e24f8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -69,7 +69,7 @@ /** * Repository cleanup action for repository implementations based on {@link BlobStoreRepository}. - * + *

        * The steps taken by the repository cleanup operation are as follows: *

          *
        1. Check that there are no running repository cleanup, snapshot create, or snapshot delete actions diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java index ac975e917e056..6fb086f65497e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java @@ -88,7 +88,7 @@ public ActionRequestValidationException validate() { /** * The names of the repositories. * - * @return list of repositories + * @return array of repository names */ public String[] repositories() { return this.repositories; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java index 098a0e60142e7..f8c8df25be532 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java @@ -42,8 +42,8 @@ import org.opensearch.core.xcontent.XContentParser; import java.io.IOException; -import java.util.Collections; import java.util.List; +import java.util.Map; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; @@ -83,7 +83,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); repositories.toXContent( builder, - new DelegatingMapParams(Collections.singletonMap(RepositoriesMetadata.HIDE_GENERATIONS_PARAM, "true"), params) + new DelegatingMapParams( + Map.of(RepositoriesMetadata.HIDE_GENERATIONS_PARAM, "true", RepositoriesMetadata.HIDE_SYSTEM_REPOSITORY_SETTING, "true"), + params + ) ); builder.endObject(); return builder; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index ad6be17821337..582f73f335b49 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -32,7 +32,9 @@ package org.opensearch.action.admin.cluster.repositories.put; +import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.admin.cluster.crypto.CryptoSettings; import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; @@ -67,12 +69,18 @@ public class PutRepositoryRequest extends AcknowledgedRequest repositoryDefinition) { @SuppressWarnings("unchecked") Map sub = (Map) entry.getValue(); settings(sub); + } else if (name.equals("crypto_settings")) { + if (!(entry.getValue() instanceof Map)) { + throw new IllegalArgumentException("Malformed encryption_settings section, should include an inner object"); + } + @SuppressWarnings("unchecked") + Map sub = (Map) entry.getValue(); + CryptoSettings cryptoSettings = new CryptoSettings(sub); + cryptoSettings(cryptoSettings); } } return this; @@ -236,6 +275,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(type); writeSettingsToStream(settings, out); out.writeBoolean(verify); + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { + out.writeOptionalWriteable(cryptoSettings); + } } @Override @@ -249,6 +291,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); builder.field("verify", verify); + + if (cryptoSettings != null) { + builder.startObject("crypto_settings"); + cryptoSettings.toXContent(builder, params); + builder.endObject(); + } + builder.endObject(); return builder; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java index 6e1b2795b6375..cf649ee6b4cbf 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.repositories.put; +import org.opensearch.action.admin.cluster.crypto.CryptoSettings; import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; @@ -141,4 +142,15 @@ public PutRepositoryRequestBuilder setVerify(boolean verify) { request.verify(verify); return this; } + + /** + * Sets the repository encryption settings + * + * @param cryptoSettings repository crypto settings builder + * @return this builder + */ + public PutRepositoryRequestBuilder setEncryptionSettings(CryptoSettings cryptoSettings) { + request.cryptoSettings(cryptoSettings); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index 644f23d2bafe6..1eadab6b1352e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -100,7 +100,7 @@ protected void clusterManagerOperation( ClusterState state, final ActionListener listener ) { - repositoriesService.registerRepository( + repositoriesService.registerOrUpdateRepository( request, ActionListener.delegateFailure( listener, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 7796353d81c5b..a1afaedf47fa4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -266,7 +266,7 @@ public CreateSnapshotRequest indices(List indices) { /** * Returns a list of indices that should be included into the snapshot * - * @return list of indices + * @return array of index names */ @Override public String[] indices() { @@ -321,7 +321,7 @@ public CreateSnapshotRequest partial(boolean partial) { /** * If set to true the operation should wait for the snapshot completion before returning. - * + *

          * By default, the operation will return as soon as snapshot is initialized. It can be changed by setting this * flag to true. * diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 895219985c811..678df866ad8d3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -40,7 +40,6 @@ import org.opensearch.common.Nullable; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -158,7 +157,7 @@ public RestoreSnapshotRequest(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_2_7_0)) { storageType = in.readEnum(StorageType.class); } - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE) && in.getVersion().onOrAfter(Version.V_2_9_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { sourceRemoteStoreRepository = in.readOptionalString(); } } @@ -191,7 +190,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_7_0)) { out.writeEnum(storageType); } - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE) && out.getVersion().onOrAfter(Version.V_2_9_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { out.writeOptionalString(sourceRemoteStoreRepository); } } @@ -513,7 +512,7 @@ public Settings indexSettings() { * this is the snapshot that this request restores. If the client can only identify a snapshot by its name then there is a risk that the * desired snapshot may be deleted and replaced by a new snapshot with the same name which is inconsistent with the original one. This * method lets us fail the restore if the precise snapshot we want is not available. - * + *

          * This is for internal use only and is not exposed in the REST layer. */ public RestoreSnapshotRequest snapshotUuid(String snapshotUuid) { @@ -631,11 +630,6 @@ public RestoreSnapshotRequest source(Map source) { } } else if (name.equals("source_remote_store_repository")) { - if (!FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - throw new IllegalArgumentException( - "Unsupported parameter " + name + ". Please enable remote store feature flag for this experimental feature" - ); - } if (entry.getValue() instanceof String) { setSourceRemoteStoreRepository((String) entry.getValue()); } else { @@ -686,7 +680,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (storageType != null) { storageType.toXContent(builder); } - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE) && sourceRemoteStoreRepository != null) { + if (sourceRemoteStoreRepository != null) { builder.field("source_remote_store_repository", sourceRemoteStoreRepository); } builder.endObject(); @@ -716,48 +710,29 @@ public boolean equals(Object o) { && Objects.equals(indexSettings, that.indexSettings) && Arrays.equals(ignoreIndexSettings, that.ignoreIndexSettings) && Objects.equals(snapshotUuid, that.snapshotUuid) - && Objects.equals(storageType, that.storageType); - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - equals = Objects.equals(sourceRemoteStoreRepository, that.sourceRemoteStoreRepository); - } + && Objects.equals(storageType, that.storageType) + && Objects.equals(sourceRemoteStoreRepository, that.sourceRemoteStoreRepository); return equals; } @Override public int hashCode() { int result; - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - result = Objects.hash( - snapshot, - repository, - indicesOptions, - renamePattern, - renameReplacement, - waitForCompletion, - includeGlobalState, - partial, - includeAliases, - indexSettings, - snapshotUuid, - storageType, - sourceRemoteStoreRepository - ); - } else { - result = Objects.hash( - snapshot, - repository, - indicesOptions, - renamePattern, - renameReplacement, - waitForCompletion, - includeGlobalState, - partial, - includeAliases, - indexSettings, - snapshotUuid, - storageType - ); - } + result = Objects.hash( + snapshot, + repository, + indicesOptions, + renamePattern, + renameReplacement, + waitForCompletion, + includeGlobalState, + partial, + includeAliases, + indexSettings, + snapshotUuid, + storageType, + sourceRemoteStoreRepository + ); result = 31 * result + Arrays.hashCode(indices); result = 31 * result + Arrays.hashCode(ignoreIndexSettings); return result; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index a189c632a9a22..58ff1a5741069 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -168,6 +168,9 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false, false, false, + false, + false, + false, false ); List shardsStats = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java index 7cda041b7d9cf..814a65e2a5bf0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java @@ -81,7 +81,7 @@ public Builder addIndices(String... indices) { /** * Specifies what type of requested indices to ignore and wildcard indices expressions. - * + *

          * For example indices that don't exist. */ @SuppressWarnings("unchecked") diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index 72b65138f59c9..76e0e1651a168 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -247,7 +247,7 @@ public CreateIndexRequest settings(Map source) { /** * Set the mapping for this index - * + *

          * The mapping should be in the form of a JSON string, with an outer _doc key *

                *     .mapping("{\"_doc\":{\"properties\": ... }}")
          @@ -273,7 +273,7 @@ public CreateIndexRequest mapping(String source, XContentType xContentType) {
           
               /**
                * Adds mapping that will be added when the index gets created.
          -     *
          +     * 

          * Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -300,7 +300,7 @@ private CreateIndexRequest mapping(BytesReference source, XContentType xContentT /** * Adds mapping that will be added when the index gets created. - * + *

          * Note that the definition should *not* be nested under a type name. * * @param source The mapping source @@ -436,7 +436,7 @@ public CreateIndexRequest source(String source, XContentType xContentType) { /** * Sets the settings and mappings as a single source. - * + *

          * Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(String source, MediaType mediaType) { @@ -462,7 +462,7 @@ public CreateIndexRequest source(byte[] source, XContentType xContentType) { /** * Sets the settings and mappings as a single source. - * + *

          * Note that the mapping definition should *not* be nested under a type name. */ public CreateIndexRequest source(byte[] source, MediaType mediaType) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java index 0cff01af536dc..384ae2e028bba 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java @@ -130,7 +130,7 @@ protected void writeNodesTo(StreamOutput out, List * NOTE: visible for testing * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java index e3c812aedcfe7..6a1d04efa9714 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsRequest.java @@ -46,7 +46,7 @@ /** * Request the mappings of specific fields - * + *

          * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should go to that client class as well. * diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java index d874b5bb6b1ac..94c88e30295a8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java @@ -61,7 +61,7 @@ /** * Response object for {@link GetFieldMappingsRequest} API - * + *

          * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should go to that client class as well. * diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java index 1e780373e51df..b2c1083df1671 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -216,7 +216,7 @@ public String source() { /** * A specialized simplified mapping source method, takes the form of simple properties definition: * ("field1", "type=string,store=true"). - * + *

          * Also supports metadata mapping fields such as `_all` and `_parent` as property definition, these metadata * mapping fields will automatically be put on the top level mapping object. */ diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java index c46940fbfecf9..b82b68f6f9489 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java @@ -89,7 +89,7 @@ public void activeOnly(boolean activeOnly) { /** * Contains list of shard id's if shards are passed, empty otherwise. Array is empty by default. * - * @return list of shard id's if shards are passed, empty otherwise + * @return array of shard id's if shards are passed, empty otherwise */ public String[] shards() { return shards; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java index b25bc94a5c8e2..353cdbbbc840c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java @@ -54,7 +54,7 @@ /** * Request class to swap index under an alias or increment data stream generation upon satisfying conditions - * + *

          * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should also go to that client class. * diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java index 55ee65d0a4973..a66fcc9e9bcf2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java @@ -51,7 +51,7 @@ /** * Response object for {@link RolloverRequest} API - * + *

          * Note: there is a new class with the same name for the Java HLRC that uses a typeless format. * Any changes done to this class should also go to that client class. * diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java index 6715a8d4dc05b..23cd8efdcaf59 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java @@ -66,6 +66,8 @@ import java.util.Set; import java.util.function.IntFunction; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; + /** * Main class to initiate resizing (shrink / split) an index into a new index * @@ -138,25 +140,78 @@ protected void clusterManagerOperation( // there is no need to fetch docs stats for split but we keep it simple and do it anyway for simplicity of the code final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getSourceIndex()); final String targetIndex = indexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getTargetIndexRequest().index()); - client.admin() - .indices() - .prepareStats(sourceIndex) - .clear() - .setDocs(true) - .setStore(true) - .execute(ActionListener.delegateFailure(listener, (delegatedListener, indicesStatsResponse) -> { - CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(resizeRequest, state, i -> { - IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i); - return shard == null ? null : shard.getPrimary().getDocs(); - }, indicesStatsResponse.getPrimaries().store, sourceIndex, targetIndex); - createIndexService.createIndex( - updateRequest, - ActionListener.map( - delegatedListener, - response -> new ResizeResponse(response.isAcknowledged(), response.isShardsAcknowledged(), updateRequest.index()) - ) - ); - })); + + IndexMetadata indexMetadata = state.metadata().index(sourceIndex); + if (resizeRequest.getResizeType().equals(ResizeType.SHRINK) + && state.metadata().isSegmentReplicationEnabled(sourceIndex) + && indexMetadata != null + && Integer.valueOf(indexMetadata.getSettings().get(SETTING_NUMBER_OF_REPLICAS)) > 0) { + client.admin() + .indices() + .prepareRefresh(sourceIndex) + .execute(ActionListener.delegateFailure(listener, (delegatedRefreshListener, refreshResponse) -> { + client.admin() + .indices() + .prepareStats(sourceIndex) + .clear() + .setDocs(true) + .setStore(true) + .setSegments(true) + .execute(ActionListener.delegateFailure(listener, (delegatedIndicesStatsListener, indicesStatsResponse) -> { + CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(resizeRequest, state, i -> { + IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i); + return shard == null ? null : shard.getPrimary().getDocs(); + }, indicesStatsResponse.getPrimaries().store, sourceIndex, targetIndex); + + if (indicesStatsResponse.getIndex(sourceIndex) + .getTotal() + .getSegments() + .getReplicationStats().maxBytesBehind != 0) { + throw new IllegalStateException( + " For index [" + + sourceIndex + + "] replica shards haven't caught up with primary, please retry after sometime." + ); + } + + createIndexService.createIndex( + updateRequest, + ActionListener.map( + delegatedIndicesStatsListener, + response -> new ResizeResponse( + response.isAcknowledged(), + response.isShardsAcknowledged(), + updateRequest.index() + ) + ) + ); + })); + })); + } else { + client.admin() + .indices() + .prepareStats(sourceIndex) + .clear() + .setDocs(true) + .setStore(true) + .execute(ActionListener.delegateFailure(listener, (delegatedListener, indicesStatsResponse) -> { + CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(resizeRequest, state, i -> { + IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i); + return shard == null ? null : shard.getPrimary().getDocs(); + }, indicesStatsResponse.getPrimaries().store, sourceIndex, targetIndex); + createIndexService.createIndex( + updateRequest, + ActionListener.map( + delegatedListener, + response -> new ResizeResponse( + response.isAcknowledged(), + response.isShardsAcknowledged(), + updateRequest.index() + ) + ) + ); + })); + } } diff --git a/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java b/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java index 0d6d122e31261..25a2c081f8441 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java +++ b/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java @@ -40,9 +40,9 @@ /** * Provides a backoff policy for bulk requests. Whenever a bulk request is rejected due to resource constraints (i.e. the client's internal * thread pool is full), the backoff policy decides how long the bulk processor will wait before the operation is retried internally. - * + *

          * Notes for implementing custom subclasses: - * + *

          * The underlying mathematical principle of BackoffPolicy are progressions which can be either finite or infinite although * the latter should not be used for retrying. A progression can be mapped to a java.util.Iterator with the following * semantics: @@ -241,7 +241,7 @@ private static class ExponentialEqualJitterBackoffIterator implements Iterator * NOTE: If the value is greater than 30, there can be integer overflow * issues during delay calculation. **/ diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java index 72e22f6b72019..5fdd2305fcf2f 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkItemResponse.java @@ -210,7 +210,7 @@ public static class Failure implements Writeable, ToXContentFragment { /** * For write failures before operation was assigned a sequence number. - * + *

          * use @{link {@link #Failure(String, String, Exception, long, long)}} * to record operation sequence no with failure */ diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java index baf64b3e80af6..141ec24fc390f 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java @@ -185,7 +185,7 @@ public Builder setGlobalPipeline(String globalPipeline) { /** * Sets a custom backoff policy. The backoff policy defines how the bulk processor should handle retries of bulk requests internally * in case they have failed due to resource constraints (i.e. a thread pool was full). - * + *

          * The default is to back off exponentially. * * @see org.opensearch.action.bulk.BackoffPolicy#exponentialBackoff() diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java index 65043da6c2684..f2f3077001a13 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java @@ -67,7 +67,7 @@ /** * A bulk request holds an ordered {@link IndexRequest}s, {@link DeleteRequest}s and {@link UpdateRequest}s * and allows to executes it in a single batch. - * + *

          * Note that we only support refresh on the bulk request not per item. * @see org.opensearch.client.Client#bulk(BulkRequest) * @@ -123,7 +123,7 @@ public BulkRequest add(DocWriteRequest... requests) { /** * Add a request to the current BulkRequest. - * + *

          * Note for internal callers: This method does not respect all global parameters. * Only the global index is applied to the request objects. * Global parameters would be respected if the request was serialized for a REST call as it is @@ -347,7 +347,7 @@ public final BulkRequest timeout(TimeValue timeout) { /** * Note for internal callers (NOT high level rest client), * the global parameter setting is ignored when used with: - * + *

          * - {@link BulkRequest#add(IndexRequest)} * - {@link BulkRequest#add(UpdateRequest)} * - {@link BulkRequest#add(DocWriteRequest)} @@ -364,7 +364,7 @@ public final BulkRequest pipeline(String globalPipeline) { /** * Note for internal callers (NOT high level rest client), * the global parameter setting is ignored when used with: - * + *

          - {@link BulkRequest#add(IndexRequest)} - {@link BulkRequest#add(UpdateRequest)} - {@link BulkRequest#add(DocWriteRequest)} @@ -404,7 +404,7 @@ public Boolean requireAlias() { /** * Note for internal callers (NOT high level rest client), * the global parameter setting is ignored when used with: - * + *

          * - {@link BulkRequest#add(IndexRequest)} * - {@link BulkRequest#add(UpdateRequest)} * - {@link BulkRequest#add(DocWriteRequest)} diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java index 96c2b8f7d0576..9796afe28f8a8 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java @@ -79,11 +79,18 @@ import org.opensearch.index.IndexingPressureService; import org.opensearch.index.VersionType; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; import org.opensearch.indices.IndexClosedException; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.ingest.IngestService; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.listener.TraceableActionListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportService; @@ -130,7 +137,9 @@ public class TransportBulkAction extends HandledTransportAction> entry : requestsByShard.entrySet()) { final ShardId shardId = entry.getKey(); final List requests = entry.getValue(); @@ -629,47 +657,66 @@ protected void doRun() { bulkShardRequest::ramBytesUsed, isOnlySystem ); - shardBulkAction.execute(bulkShardRequest, ActionListener.runBefore(new ActionListener() { - @Override - public void onResponse(BulkShardResponse bulkShardResponse) { - for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) { - // we may have no response if item failed - if (bulkItemResponse.getResponse() != null) { - bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); + + final Span span = tracer.startSpan(SpanBuilder.from("bulkShardAction", nodeId, bulkShardRequest)); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + shardBulkAction.execute( + bulkShardRequest, + TraceableActionListener.create(ActionListener.runBefore(new ActionListener() { + @Override + public void onResponse(BulkShardResponse bulkShardResponse) { + for (BulkItemResponse bulkItemResponse : bulkShardResponse.getResponses()) { + // we may have no response if item failed + if (bulkItemResponse.getResponse() != null) { + bulkItemResponse.getResponse().setShardInfo(bulkShardResponse.getShardInfo()); + } + + docStatusStats.inc(bulkItemResponse.status()); + responses.set(bulkItemResponse.getItemId(), bulkItemResponse); + } + + if (counter.decrementAndGet() == 0) { + finishHim(); + } } - responses.set(bulkItemResponse.getItemId(), bulkItemResponse); - } - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } - @Override - public void onFailure(Exception e) { - // create failures for all relevant requests - for (BulkItemRequest request : requests) { - final String indexName = concreteIndices.getConcreteIndex(request.index()).getName(); - DocWriteRequest docWriteRequest = request.request(); - responses.set( - request.id(), - new BulkItemResponse( - request.id(), - docWriteRequest.opType(), - new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e) - ) - ); - } - if (counter.decrementAndGet() == 0) { - finishHim(); - } - } + @Override + public void onFailure(Exception e) { + // create failures for all relevant requests + for (BulkItemRequest request : requests) { + final String indexName = concreteIndices.getConcreteIndex(request.index()).getName(); + final DocWriteRequest docWriteRequest = request.request(); + final BulkItemResponse bulkItemResponse = new BulkItemResponse( + request.id(), + docWriteRequest.opType(), + new BulkItemResponse.Failure(indexName, docWriteRequest.id(), e) + ); + + docStatusStats.inc(bulkItemResponse.status()); + responses.set(request.id(), bulkItemResponse); + } - private void finishHim() { - listener.onResponse( - new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)) - ); - } - }, releasable::close)); + if (counter.decrementAndGet() == 0) { + finishHim(); + } + } + + private void finishHim() { + indicesService.addDocStatusStats(docStatusStats); + listener.onResponse( + new BulkResponse( + responses.toArray(new BulkItemResponse[responses.length()]), + buildTookInMillis(startTimeNanos) + ) + ); + } + }, releasable::close), span, tracer) + ); + } catch (Exception e) { + span.setError(e); + span.endSpan(); + throw e; + } } bulkRequest = null; // allow memory for bulk request items to be reclaimed before all items have been completed } @@ -771,6 +818,10 @@ void executeBulk( final AtomicArray responses, Map indicesThatCannotBeCreated ) { + /* + * We are not wrapping the listener here to capture the response codes for performance benefits. It will + * be saving us an iteration over the responses array + */ new BulkOperation(task, bulkRequest, listener, responses, startTimeNanos, indicesThatCannotBeCreated).run(); } diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index efc21b2c03808..268a6ed6f85b8 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -72,7 +72,6 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.action.ActionListener; @@ -100,6 +99,7 @@ import org.opensearch.indices.SystemIndices; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportChannel; @@ -162,7 +162,8 @@ public TransportShardBulkAction( IndexingPressureService indexingPressureService, SegmentReplicationPressureService segmentReplicationPressureService, RemoteStorePressureService remoteStorePressureService, - SystemIndices systemIndices + SystemIndices systemIndices, + Tracer tracer ) { super( settings, @@ -178,7 +179,8 @@ public TransportShardBulkAction( EXECUTOR_NAME_FUNCTION, false, indexingPressureService, - systemIndices + systemIndices, + tracer ); this.updateHelper = updateHelper; this.mappingUpdatedAction = mappingUpdatedAction; @@ -539,7 +541,7 @@ protected Releasable checkPrimaryLimits(BulkShardRequest request, boolean rerout } // TODO - While removing remote store flag, this can be encapsulated to single class with common interface for backpressure // service - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE) && remoteStorePressureService.isSegmentsUploadBackpressureEnabled()) { + if (remoteStorePressureService.isSegmentsUploadBackpressureEnabled()) { remoteStorePressureService.validateSegmentsUploadLag(request.shardId()); } } diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java index cb8195b09593f..43bd6e8617737 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java @@ -209,7 +209,7 @@ public long ifSeqNo() { /** * If set, only perform this delete request if the document was last modification was assigned this primary term. - * + *

          * If the document last modification was assigned a different term a * {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown. */ diff --git a/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java b/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java index 039214459ac21..6cbabfec6d763 100644 --- a/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java +++ b/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java @@ -40,7 +40,7 @@ /** * Performs the delete operation. - * + *

          * Deprecated use TransportBulkAction with a single item instead * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/get/TransportGetAction.java b/server/src/main/java/org/opensearch/action/get/TransportGetAction.java index 583815b91ae68..00a795c86356f 100644 --- a/server/src/main/java/org/opensearch/action/get/TransportGetAction.java +++ b/server/src/main/java/org/opensearch/action/get/TransportGetAction.java @@ -36,8 +36,8 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.single.shard.TransportSingleShardAction; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.routing.Preference; import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.service.ClusterService; @@ -49,12 +49,10 @@ import org.opensearch.index.get.GetResult; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; -import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import java.io.IOException; -import java.util.Optional; /** * Performs the get operation. @@ -92,20 +90,11 @@ protected boolean resolveIndex(GetRequest request) { return true; } - static boolean isSegmentReplicationEnabled(ClusterState state, String indexName) { - return Optional.ofNullable(state.getMetadata().index(indexName)) - .map( - indexMetadata -> ReplicationType.parseString(indexMetadata.getSettings().get(IndexMetadata.SETTING_REPLICATION_TYPE)) - .equals(ReplicationType.SEGMENT) - ) - .orElse(false); - } - /** * Returns true if GET request should be routed to primary shards, else false. */ - protected static boolean shouldForcePrimaryRouting(ClusterState state, boolean realtime, String preference, String indexName) { - return isSegmentReplicationEnabled(state, indexName) && realtime && preference == null; + protected static boolean shouldForcePrimaryRouting(Metadata metadata, boolean realtime, String preference, String indexName) { + return metadata.isSegmentReplicationEnabled(indexName) && realtime && preference == null; } @Override @@ -113,7 +102,12 @@ protected ShardIterator shards(ClusterState state, InternalRequest request) { final String preference; // route realtime GET requests when segment replication is enabled to primary shards, // iff there are no other preferences/routings enabled for routing to a specific shard - if (shouldForcePrimaryRouting(state, request.request().realtime, request.request().preference(), request.concreteIndex())) { + if (shouldForcePrimaryRouting( + state.getMetadata(), + request.request().realtime, + request.request().preference(), + request.concreteIndex() + )) { preference = Preference.PRIMARY.type(); } else { preference = request.request().preference(); diff --git a/server/src/main/java/org/opensearch/action/get/TransportMultiGetAction.java b/server/src/main/java/org/opensearch/action/get/TransportMultiGetAction.java index a1a74208dc725..8bbfef381aea8 100644 --- a/server/src/main/java/org/opensearch/action/get/TransportMultiGetAction.java +++ b/server/src/main/java/org/opensearch/action/get/TransportMultiGetAction.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockLevel; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.routing.Preference; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -51,8 +52,6 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; -import static org.opensearch.action.get.TransportGetAction.shouldForcePrimaryRouting; - /** * Perform the multi get action. * @@ -78,6 +77,10 @@ public TransportMultiGetAction( this.indexNameExpressionResolver = resolver; } + protected static boolean shouldForcePrimaryRouting(Metadata metadata, boolean realtime, String preference, String indexName) { + return metadata.isSegmentReplicationEnabled(indexName) && realtime && preference == null; + } + @Override protected void doExecute(Task task, final MultiGetRequest request, final ActionListener listener) { ClusterState clusterState = clusterService.state(); @@ -112,7 +115,7 @@ protected void doExecute(Task task, final MultiGetRequest request, final ActionL MultiGetShardRequest shardRequest = shardRequests.get(shardId); if (shardRequest == null) { - if (shouldForcePrimaryRouting(clusterState, request.realtime, request.preference, concreteSingleIndex)) { + if (shouldForcePrimaryRouting(clusterState.getMetadata(), request.realtime(), request.preference(), concreteSingleIndex)) { request.preference(Preference.PRIMARY.type()); } shardRequest = new MultiGetShardRequest(request, shardId.getIndexName(), shardId.getId()); diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequest.java b/server/src/main/java/org/opensearch/action/index/IndexRequest.java index 2500496103415..3b0261349744a 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequest.java @@ -76,14 +76,14 @@ /** * Index request to index a typed JSON document into a specific index and make it searchable. Best * created using {@link org.opensearch.client.Requests#indexRequest(String)}. - * + *

          * The index requires the {@link #index()}, {@link #id(String)} and * {@link #source(byte[], MediaType)} to be set. - * + *

          * The source (content to index) can be set in its bytes form using ({@link #source(byte[], MediaType)}), * its string form ({@link #source(String, MediaType)}) or using a {@link XContentBuilder} * ({@link #source(XContentBuilder)}). - * + *

          * If the {@link #id(String)} is not set, it will be automatically generated. * * @see IndexResponse @@ -399,7 +399,7 @@ public IndexRequest source(Map source, MediaType contentType) throws /** * Sets the document source to index. - * + *

          * Note, its preferable to either set it using {@link #source(XContentBuilder)} * or using the {@link #source(byte[], MediaType)}. */ @@ -602,7 +602,7 @@ public long ifSeqNo() { /** * If set, only perform this indexing request if the document was last modification was assigned this primary term. - * + *

          * If the document last modification was assigned a different term a * {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown. */ diff --git a/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java b/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java index fe4f80bf0c065..ce32840f6751b 100644 --- a/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java +++ b/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java @@ -40,7 +40,7 @@ /** * Performs the index operation. - * + *

          * Allows for the following settings: *

            *
          • autoCreateIndex: When set to {@code true}, will automatically create an index if one does not exists. diff --git a/server/src/main/java/org/opensearch/action/ingest/IngestActionForwarder.java b/server/src/main/java/org/opensearch/action/ingest/IngestActionForwarder.java index 9927affbc7442..2821f4fd7fadb 100644 --- a/server/src/main/java/org/opensearch/action/ingest/IngestActionForwarder.java +++ b/server/src/main/java/org/opensearch/action/ingest/IngestActionForwarder.java @@ -46,7 +46,7 @@ /** * A utility for forwarding ingest requests to ingest nodes in a round-robin fashion. - * + *

            * TODO: move this into IngestService and make index/bulk actions call that * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java index 2234934499609..ec3ee981b646f 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java @@ -218,7 +218,12 @@ private static List parseDocs(Map config) { String routing = ConfigurationUtils.readOptionalStringOrIntProperty(null, null, dataMap, Metadata.ROUTING.getFieldName()); Long version = null; if (dataMap.containsKey(Metadata.VERSION.getFieldName())) { - version = (Long) ConfigurationUtils.readObject(null, null, dataMap, Metadata.VERSION.getFieldName()); + Object versionFieldValue = ConfigurationUtils.readObject(null, null, dataMap, Metadata.VERSION.getFieldName()); + if (versionFieldValue instanceof Integer || versionFieldValue instanceof Long) { + version = ((Number) versionFieldValue).longValue(); + } else { + throw new IllegalArgumentException("Failed to parse parameter [_version], only int or long is accepted"); + } } VersionType versionType = null; if (dataMap.containsKey(Metadata.VERSION_TYPE.getFieldName())) { @@ -228,12 +233,25 @@ private static List parseDocs(Map config) { } IngestDocument ingestDocument = new IngestDocument(index, id, routing, version, versionType, document); if (dataMap.containsKey(Metadata.IF_SEQ_NO.getFieldName())) { - Long ifSeqNo = (Long) ConfigurationUtils.readObject(null, null, dataMap, Metadata.IF_SEQ_NO.getFieldName()); - ingestDocument.setFieldValue(Metadata.IF_SEQ_NO.getFieldName(), ifSeqNo); + Object ifSeqNoFieldValue = ConfigurationUtils.readObject(null, null, dataMap, Metadata.IF_SEQ_NO.getFieldName()); + if (ifSeqNoFieldValue instanceof Integer || ifSeqNoFieldValue instanceof Long) { + ingestDocument.setFieldValue(Metadata.IF_SEQ_NO.getFieldName(), ((Number) ifSeqNoFieldValue).longValue()); + } else { + throw new IllegalArgumentException("Failed to parse parameter [_if_seq_no], only int or long is accepted"); + } } if (dataMap.containsKey(Metadata.IF_PRIMARY_TERM.getFieldName())) { - Long ifPrimaryTerm = (Long) ConfigurationUtils.readObject(null, null, dataMap, Metadata.IF_PRIMARY_TERM.getFieldName()); - ingestDocument.setFieldValue(Metadata.IF_PRIMARY_TERM.getFieldName(), ifPrimaryTerm); + Object ifPrimaryTermFieldValue = ConfigurationUtils.readObject( + null, + null, + dataMap, + Metadata.IF_PRIMARY_TERM.getFieldName() + ); + if (ifPrimaryTermFieldValue instanceof Integer || ifPrimaryTermFieldValue instanceof Long) { + ingestDocument.setFieldValue(Metadata.IF_PRIMARY_TERM.getFieldName(), ((Number) ifPrimaryTermFieldValue).longValue()); + } else { + throw new IllegalArgumentException("Failed to parse parameter [_if_primary_term], only int or long is accepted"); + } } ingestDocumentList.add(ingestDocument); } diff --git a/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java index 032fe83e2220b..9d60706d1f100 100644 --- a/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/resync/TransportResyncReplicationAction.java @@ -54,6 +54,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportException; @@ -93,7 +94,8 @@ public TransportResyncReplicationAction( ShardStateAction shardStateAction, ActionFilters actionFilters, IndexingPressureService indexingPressureService, - SystemIndices systemIndices + SystemIndices systemIndices, + Tracer tracer ) { super( settings, @@ -109,7 +111,8 @@ public TransportResyncReplicationAction( EXECUTOR_NAME_FUNCTION, true, /* we should never reject resync because of thread pool capacity on primary */ indexingPressureService, - systemIndices + systemIndices, + tracer ); } diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index ee8aa10577956..14f57218ae1dc 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -65,6 +65,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; @@ -107,7 +108,6 @@ abstract class AbstractSearchAsyncAction exten private final AtomicInteger skippedOps = new AtomicInteger(); private final TransportSearchAction.SearchTimeProvider timeProvider; private final SearchResponse.Clusters clusters; - protected final GroupShardsIterator toSkipShardsIts; protected final GroupShardsIterator shardsIts; private final int expectedTotalOps; @@ -116,8 +116,12 @@ abstract class AbstractSearchAsyncAction exten private final Map pendingExecutionsPerNode = new ConcurrentHashMap<>(); private final boolean throttleConcurrentRequests; + private SearchPhase currentPhase; + private final List releasables = new ArrayList<>(); + private Optional searchRequestOperationsListener; + AbstractSearchAsyncAction( String name, Logger logger, @@ -135,7 +139,8 @@ abstract class AbstractSearchAsyncAction exten SearchTask task, SearchPhaseResults resultConsumer, int maxConcurrentRequestsPerNode, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ) { super(name); final List toSkipIterators = new ArrayList<>(); @@ -171,6 +176,7 @@ abstract class AbstractSearchAsyncAction exten this.indexRoutings = indexRoutings; this.results = resultConsumer; this.clusters = clusters; + this.searchRequestOperationsListener = Optional.ofNullable(searchRequestOperationsListener); } @Override @@ -209,6 +215,7 @@ public final void start() { 0, 0, buildTookInMillis(), + timeProvider.getPhaseTook(), ShardSearchFailure.EMPTY_ARRAY, clusters, null @@ -371,6 +378,7 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha : OpenSearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; logger.debug(() -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()), cause); onPhaseFailure(currentPhase, "all shards failed", cause); + } else { Boolean allowPartialResults = request.allowPartialSearchResults(); assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults"; @@ -419,13 +427,24 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha clusterState.version() ); } + onPhaseEnd(); executePhase(nextPhase); } } + private void onPhaseEnd() { + this.searchRequestOperationsListener.ifPresent(searchRequestOperations -> { searchRequestOperations.onPhaseEnd(this); }); + } + + private void onPhaseStart(SearchPhase phase) { + setCurrentPhase(phase); + this.searchRequestOperationsListener.ifPresent(searchRequestOperations -> { searchRequestOperations.onPhaseStart(this); }); + } + private void executePhase(SearchPhase phase) { try { - phase.run(); + onPhaseStart(phase); + phase.recordAndRun(); } catch (Exception e) { if (logger.isDebugEnabled()) { logger.debug(new ParameterizedMessage("Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), e); @@ -603,6 +622,14 @@ private void successfulShardExecution(SearchShardIterator shardsIt) { } } + public SearchPhase getCurrentPhase() { + return currentPhase; + } + + private void setCurrentPhase(SearchPhase phase) { + currentPhase = phase; + } + @Override public final int getNumShards() { return results.getNumShards(); @@ -636,6 +663,7 @@ protected final SearchResponse buildSearchResponse( successfulOps.get(), skippedOps.get(), buildTookInMillis(), + timeProvider.getPhaseTook(), failures, clusters, searchContextId @@ -670,10 +698,13 @@ public void sendSearchResponse(InternalSearchResponse internalSearchResponse, At } listener.onResponse(buildSearchResponse(internalSearchResponse, failures, scrollId, searchContextId)); } + onPhaseEnd(); + setCurrentPhase(null); } @Override public final void onPhaseFailure(SearchPhase phase, String msg, Throwable cause) { + this.searchRequestOperationsListener.ifPresent(searchRequestOperations -> searchRequestOperations.onPhaseFailure(this)); raisePhaseFailure(new SearchPhaseExecutionException(phase.getName(), msg, cause, buildShardFailures())); } diff --git a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java index 6c3ee652de2de..ae481736ad0aa 100644 --- a/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/CanMatchPreFilterSearchPhase.java @@ -90,7 +90,8 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction, SearchPhase> phaseFactory, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ) { // We set max concurrent shard requests to the number of shards so no throttling happens for can_match requests super( @@ -110,7 +111,8 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction { executeUpdatePitId(request, searchRequest, searchResponse, updatePitIdListener); }, @@ -167,9 +167,9 @@ void executeUpdatePitId( searchResponse.pointInTimeId() ) ); - /** - * store the create time ( same create time for all PIT contexts across shards ) to be used - * for list PIT api + /* + store the create time ( same create time for all PIT contexts across shards ) to be used + for list PIT api */ final long relativeStartNanos = System.nanoTime(); final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java index da8f8f144eaf2..00e0345062d1c 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchRequest.java @@ -277,6 +277,8 @@ public static void readMultiLineFormat( } else if ("cancel_after_time_interval".equals(entry.getKey()) || "cancelAfterTimeInterval".equals(entry.getKey())) { searchRequest.setCancelAfterTimeInterval(nodeTimeValue(value, null)); + } else if ("phase_took".equals(entry.getKey())) { + searchRequest.setPhaseTook(nodeBooleanValue(value)); } else { throw new IllegalArgumentException("key [" + entry.getKey() + "] is not supported in the metadata section"); } @@ -374,6 +376,9 @@ public static void writeSearchRequestParams(SearchRequest request, XContentBuild if (request.getCancelAfterTimeInterval() != null) { xContentBuilder.field("cancel_after_time_interval", request.getCancelAfterTimeInterval().getStringRep()); } + if (request.isPhaseTook() != null) { + xContentBuilder.field("phase_took", request.isPhaseTook()); + } xContentBuilder.endObject(); } diff --git a/server/src/main/java/org/opensearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 31e9a10b9fba3..eca9646ee6b7c 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -76,10 +76,11 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction final TransportSearchAction.SearchTimeProvider timeProvider, final ClusterState clusterState, final SearchTask task, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ) { super( - "dfs", + SearchPhaseName.DFS_PRE_QUERY.getName(), logger, searchTransportService, nodeIdToConnection, @@ -95,7 +96,8 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction task, new ArraySearchPhaseResults<>(shardsIts.size()), request.getMaxConcurrentShardRequests(), - clusters + clusters, + searchRequestOperationsListener ); this.queryPhaseResultConsumer = queryPhaseResultConsumer; this.searchPhaseController = searchPhaseController; diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhase.java b/server/src/main/java/org/opensearch/action/search/SearchPhase.java index 50b0cd8e01c1d..1c7b3c1f1563c 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhase.java @@ -42,13 +42,23 @@ * * @opensearch.internal */ -abstract class SearchPhase implements CheckedRunnable { +public abstract class SearchPhase implements CheckedRunnable { private final String name; + private long startTimeInNanos; protected SearchPhase(String name) { this.name = Objects.requireNonNull(name, "name must not be null"); } + public long getStartTimeInNanos() { + return startTimeInNanos; + } + + public void recordAndRun() throws IOException { + this.startTimeInNanos = System.nanoTime(); + run(); + } + /** * Returns the phases name. */ diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java index 4ffd5521793f6..45d39a6f85ea2 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseContext.java @@ -73,6 +73,8 @@ public interface SearchPhaseContext extends Executor { */ SearchRequest getRequest(); + SearchPhase getCurrentPhase(); + /** * Builds and sends the final search response back to the user. * diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java index cca85f92d2676..161a103cdf36a 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java @@ -162,7 +162,7 @@ public AggregatedDfs aggregateDfs(Collection results) { * Returns a score doc array of top N search docs across all shards, followed by top suggest docs for each * named completion suggestion across all shards. If more than one named completion suggestion is specified in the * request, the suggest docs for a named suggestion are ordered by the suggestion name. - * + *

            * Note: The order of the sorted score docs depends on the shard index in the result array if the merge process needs to disambiguate * the result. In oder to obtain stable results the shard index (index of the result in the result array) must be the same. * @@ -284,7 +284,7 @@ public List[] fillDocIdsToLoad(int numShards, ScoreDoc[] shardDocs) { /** * Enriches search hits and completion suggestion hits from sortedDocs using fetchResultsArr, * merges suggestions, aggregations and profile results - * + *

            * Expects sortedDocs to have top search docs across all shards, optionally followed by top suggest docs for each named * completion suggestion ordered by suggestion name */ diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java index b6f842cf2cce1..4c0fe3ac06326 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseName.java @@ -13,6 +13,7 @@ * @opensearch.internal */ public enum SearchPhaseName { + DFS_PRE_QUERY("dfs_pre_query"), QUERY("query"), FETCH("fetch"), DFS_QUERY("dfs_query"), diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java new file mode 100644 index 0000000000000..8fe1be610f9af --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; +import org.opensearch.index.query.QueryShapeVisitor; +import org.opensearch.search.aggregations.AggregatorFactories; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.sort.SortBuilder; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.tags.Tags; + +import java.util.List; +import java.util.ListIterator; + +/** + * Class to categorize the search queries based on the type and increment the relevant counters. + * Class also logs the query shape. + */ +final class SearchQueryCategorizer { + + private static final Logger log = LogManager.getLogger(SearchQueryCategorizer.class); + + final SearchQueryCounters searchQueryCounters; + + public SearchQueryCategorizer(MetricsRegistry metricsRegistry) { + searchQueryCounters = new SearchQueryCounters(metricsRegistry); + } + + public void categorize(SearchSourceBuilder source) { + QueryBuilder topLevelQueryBuilder = source.query(); + + logQueryShape(topLevelQueryBuilder); + incrementQueryTypeCounters(topLevelQueryBuilder); + incrementQueryAggregationCounters(source.aggregations()); + incrementQuerySortCounters(source.sorts()); + } + + private void incrementQuerySortCounters(List> sorts) { + if (sorts != null && sorts.size() > 0) { + for (ListIterator> it = sorts.listIterator(); it.hasNext();) { + SortBuilder sortBuilder = it.next(); + String sortOrder = sortBuilder.order().toString(); + searchQueryCounters.sortCounter.add(1, Tags.create().addTag("sort_order", sortOrder)); + } + } + } + + private void incrementQueryAggregationCounters(AggregatorFactories.Builder aggregations) { + if (aggregations != null) { + searchQueryCounters.aggCounter.add(1); + } + } + + private void incrementQueryTypeCounters(QueryBuilder topLevelQueryBuilder) { + if (topLevelQueryBuilder == null) { + return; + } + QueryBuilderVisitor searchQueryVisitor = new SearchQueryCategorizingVisitor(searchQueryCounters); + topLevelQueryBuilder.visit(searchQueryVisitor); + } + + private void logQueryShape(QueryBuilder topLevelQueryBuilder) { + if (topLevelQueryBuilder == null) { + return; + } + QueryShapeVisitor shapeVisitor = new QueryShapeVisitor(); + topLevelQueryBuilder.visit(shapeVisitor); + log.trace("Query shape : {}", shapeVisitor.prettyPrintTree(" ")); + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java new file mode 100644 index 0000000000000..98f0169e69a5c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.lucene.search.BooleanClause; +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.MatchPhraseQueryBuilder; +import org.opensearch.index.query.MatchQueryBuilder; +import org.opensearch.index.query.MultiMatchQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; +import org.opensearch.index.query.QueryStringQueryBuilder; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.index.query.RegexpQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.WildcardQueryBuilder; +import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; +import org.opensearch.telemetry.metrics.tags.Tags; + +/** + * Class to visit the querybuilder tree and also track the level information. + * Increments the counters related to Search Query type. + */ +final class SearchQueryCategorizingVisitor implements QueryBuilderVisitor { + private static final String LEVEL_TAG = "level"; + private final int level; + private final SearchQueryCounters searchQueryCounters; + + public SearchQueryCategorizingVisitor(SearchQueryCounters searchQueryCounters) { + this(searchQueryCounters, 0); + } + + private SearchQueryCategorizingVisitor(SearchQueryCounters counters, int level) { + this.searchQueryCounters = counters; + this.level = level; + } + + public void accept(QueryBuilder qb) { + if (qb instanceof BoolQueryBuilder) { + searchQueryCounters.boolCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof FunctionScoreQueryBuilder) { + searchQueryCounters.functionScoreCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof MatchQueryBuilder) { + searchQueryCounters.matchCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof MatchPhraseQueryBuilder) { + searchQueryCounters.matchPhrasePrefixCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof MultiMatchQueryBuilder) { + searchQueryCounters.multiMatchCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof QueryStringQueryBuilder) { + searchQueryCounters.queryStringQueryCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof RangeQueryBuilder) { + searchQueryCounters.rangeCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof RegexpQueryBuilder) { + searchQueryCounters.regexCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof TermQueryBuilder) { + searchQueryCounters.termCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else if (qb instanceof WildcardQueryBuilder) { + searchQueryCounters.wildcardCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else { + searchQueryCounters.otherQueryCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } + } + + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + return new SearchQueryCategorizingVisitor(searchQueryCounters, level + 1); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java new file mode 100644 index 0000000000000..7e0259af07701 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java @@ -0,0 +1,117 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.MetricsRegistry; + +/** + * Class contains all the Counters related to search query types. + */ +final class SearchQueryCounters { + private static final String UNIT = "1"; + private final MetricsRegistry metricsRegistry; + + // Counters related to Query types + public final Counter aggCounter; + public final Counter boolCounter; + public final Counter functionScoreCounter; + public final Counter matchCounter; + public final Counter matchPhrasePrefixCounter; + public final Counter multiMatchCounter; + public final Counter otherQueryCounter; + public final Counter queryStringQueryCounter; + public final Counter rangeCounter; + public final Counter regexCounter; + + public final Counter sortCounter; + public final Counter skippedCounter; + public final Counter termCounter; + public final Counter totalCounter; + public final Counter wildcardCounter; + + public SearchQueryCounters(MetricsRegistry metricsRegistry) { + this.metricsRegistry = metricsRegistry; + this.aggCounter = metricsRegistry.createCounter( + "search.query.type.agg.count", + "Counter for the number of top level agg search queries", + UNIT + ); + this.boolCounter = metricsRegistry.createCounter( + "search.query.type.bool.count", + "Counter for the number of top level and nested bool search queries", + UNIT + ); + this.functionScoreCounter = metricsRegistry.createCounter( + "search.query.type.functionscore.count", + "Counter for the number of top level and nested function score search queries", + UNIT + ); + this.matchCounter = metricsRegistry.createCounter( + "search.query.type.match.count", + "Counter for the number of top level and nested match search queries", + UNIT + ); + this.matchPhrasePrefixCounter = metricsRegistry.createCounter( + "search.query.type.matchphrase.count", + "Counter for the number of top level and nested match phrase prefix search queries", + UNIT + ); + this.multiMatchCounter = metricsRegistry.createCounter( + "search.query.type.multimatch.count", + "Counter for the number of top level and nested multi match search queries", + UNIT + ); + this.otherQueryCounter = metricsRegistry.createCounter( + "search.query.type.other.count", + "Counter for the number of top level and nested search queries that do not match any other categories", + UNIT + ); + this.queryStringQueryCounter = metricsRegistry.createCounter( + "search.query.type.querystringquery.count", + "Counter for the number of top level and nested queryStringQuery search queries", + UNIT + ); + this.rangeCounter = metricsRegistry.createCounter( + "search.query.type.range.count", + "Counter for the number of top level and nested range search queries", + UNIT + ); + this.regexCounter = metricsRegistry.createCounter( + "search.query.type.regex.count", + "Counter for the number of top level and nested regex search queries", + UNIT + ); + this.skippedCounter = metricsRegistry.createCounter( + "search.query.type.skipped.count", + "Counter for the number queries skipped due to error", + UNIT + ); + this.sortCounter = metricsRegistry.createCounter( + "search.query.type.sort.count", + "Counter for the number of top level sort search queries", + UNIT + ); + this.termCounter = metricsRegistry.createCounter( + "search.query.type.term.count", + "Counter for the number of top level and nested term search queries", + UNIT + ); + this.totalCounter = metricsRegistry.createCounter( + "search.query.type.total.count", + "Counter for the number of top level and nested search queries", + UNIT + ); + this.wildcardCounter = metricsRegistry.createCounter( + "search.query.type.wildcard.count", + "Counter for the number of top level and nested wildcard search queries", + UNIT + ); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java index f75ab2554e693..ca5ad087d3089 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -81,10 +81,11 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction * When unspecified, the pre-filter phase is executed if any of these conditions is met: *

              *
            • The request targets more than 128 shards
            • @@ -634,13 +643,27 @@ public void setPreFilterShardSize(int preFilterShardSize) { this.preFilterShardSize = preFilterShardSize; } + /** + * Returns value of user-provided phase_took query parameter for this search request. + */ + public Boolean isPhaseTook() { + return phaseTook; + } + + /** + * Sets value of phase_took query param if provided by user. Defaults to null. + */ + public void setPhaseTook(Boolean phaseTook) { + this.phaseTook = phaseTook; + } + /** * Returns a threshold that enforces a pre-filter roundtrip to pre-filter search shards based on query rewriting if the number of shards * the search request expands to exceeds the threshold, or null if the threshold is unspecified. * This filter roundtrip can limit the number of shards significantly if for * instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard * bounds and the query are disjoint. - * + *

              * When unspecified, the pre-filter phase is executed if any of these conditions is met: *

                *
              • The request targets more than 128 shards
              • @@ -738,7 +761,8 @@ public boolean equals(Object o) { && absoluteStartMillis == that.absoluteStartMillis && ccsMinimizeRoundtrips == that.ccsMinimizeRoundtrips && Objects.equals(cancelAfterTimeInterval, that.cancelAfterTimeInterval) - && Objects.equals(pipeline, that.pipeline); + && Objects.equals(pipeline, that.pipeline) + && Objects.equals(phaseTook, that.phaseTook); } @Override @@ -759,7 +783,8 @@ public int hashCode() { localClusterAlias, absoluteStartMillis, ccsMinimizeRoundtrips, - cancelAfterTimeInterval + cancelAfterTimeInterval, + phaseTook ); } @@ -802,6 +827,8 @@ public String toString() { + cancelAfterTimeInterval + ", pipeline=" + pipeline + + ", phaseTook=" + + phaseTook + "}"; } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java index 861e1df0203d7..bc43b65e5d844 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestBuilder.java @@ -605,7 +605,7 @@ public SearchRequestBuilder setMaxConcurrentShardRequests(int maxConcurrentShard * the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for * instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard * bounds and the query are disjoint. - * + *

                * When unspecified, the pre-filter phase is executed if any of these conditions is met: *

                  *
                • The request targets more than 128 shards
                • diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java new file mode 100644 index 0000000000000..89d725b56bded --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; + +import java.util.List; + +/** + * A listener for search, fetch and context events at the coordinator node level + * + * @opensearch.internal + */ +public interface SearchRequestOperationsListener { + + void onPhaseStart(SearchPhaseContext context); + + void onPhaseEnd(SearchPhaseContext context); + + void onPhaseFailure(SearchPhaseContext context); + + /** + * Holder of Composite Listeners + * + * @opensearch.internal + */ + + final class CompositeListener implements SearchRequestOperationsListener { + private final List listeners; + private final Logger logger; + + public CompositeListener(List listeners, Logger logger) { + this.listeners = listeners; + this.logger = logger; + } + + @Override + public void onPhaseStart(SearchPhaseContext context) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onPhaseStart(context); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onPhaseStart listener [{}] failed", listener), e); + } + } + } + + @Override + public void onPhaseEnd(SearchPhaseContext context) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onPhaseEnd(context); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onPhaseEnd listener [{}] failed", listener), e); + } + } + } + + @Override + public void onPhaseFailure(SearchPhaseContext context) { + for (SearchRequestOperationsListener listener : listeners) { + try { + listener.onPhaseFailure(context); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onPhaseFailure listener [{}] failed", listener), e); + } + } + } + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java new file mode 100644 index 0000000000000..ad299c11b987d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.inject.Inject; +import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.common.metrics.MeanMetric; + +import java.util.EnumMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * Request level search stats to track coordinator level node search latencies + * + * @opensearch.internal + */ +public final class SearchRequestStats implements SearchRequestOperationsListener { + Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); + + @Inject + public SearchRequestStats() { + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + phaseStatsMap.put(searchPhaseName, new StatsHolder()); + } + } + + public long getPhaseCurrent(SearchPhaseName searchPhaseName) { + return phaseStatsMap.get(searchPhaseName).current.count(); + } + + public long getPhaseTotal(SearchPhaseName searchPhaseName) { + return phaseStatsMap.get(searchPhaseName).total.count(); + } + + public long getPhaseMetric(SearchPhaseName searchPhaseName) { + return phaseStatsMap.get(searchPhaseName).timing.sum(); + } + + @Override + public void onPhaseStart(SearchPhaseContext context) { + phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); + } + + @Override + public void onPhaseEnd(SearchPhaseContext context) { + StatsHolder phaseStats = phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()); + phaseStats.current.dec(); + phaseStats.total.inc(); + phaseStats.timing.inc(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - context.getCurrentPhase().getStartTimeInNanos())); + } + + @Override + public void onPhaseFailure(SearchPhaseContext context) { + phaseStatsMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); + } + + /** + * Holder of statistics values + * + * @opensearch.internal + */ + + public static final class StatsHolder { + CounterMetric current = new CounterMetric(); + CounterMetric total = new CounterMetric(); + MeanMetric timing = new MeanMetric(); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponse.java b/server/src/main/java/org/opensearch/action/search/SearchResponse.java index c644604cffd25..63c99b7d026b8 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponse.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.TotalHits; import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.StatusToXContentObject; @@ -47,9 +48,12 @@ import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.Token; import org.opensearch.rest.action.RestActions; +import org.opensearch.search.GenericSearchExtBuilder; +import org.opensearch.search.SearchExtBuilder; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.Aggregations; @@ -61,11 +65,14 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.function.Supplier; +import static org.opensearch.action.search.SearchResponseSections.EXT_FIELD; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; /** @@ -81,6 +88,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb private static final ParseField TIMED_OUT = new ParseField("timed_out"); private static final ParseField TERMINATED_EARLY = new ParseField("terminated_early"); private static final ParseField NUM_REDUCE_PHASES = new ParseField("num_reduce_phases"); + private static final ParseField EXT = new ParseField("ext"); private final SearchResponseSections internalResponse; private final String scrollId; @@ -91,6 +99,9 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb private final ShardSearchFailure[] shardFailures; private final Clusters clusters; private final long tookInMillis; + private final PhaseTook phaseTook; + + private List searchExtBuilders = new ArrayList<>(); public SearchResponse(StreamInput in) throws IOException { super(in); @@ -109,6 +120,11 @@ public SearchResponse(StreamInput in) throws IOException { clusters = new Clusters(in); scrollId = in.readOptionalString(); tookInMillis = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + phaseTook = in.readOptionalWriteable(PhaseTook::new); + } else { + phaseTook = null; + } skippedShards = in.readVInt(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { pointInTimeId = in.readOptionalString(); @@ -127,7 +143,32 @@ public SearchResponse( ShardSearchFailure[] shardFailures, Clusters clusters ) { - this(internalResponse, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, shardFailures, clusters, null); + this(internalResponse, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, null, shardFailures, clusters, null); + } + + public SearchResponse( + SearchResponseSections internalResponse, + String scrollId, + int totalShards, + int successfulShards, + int skippedShards, + long tookInMillis, + ShardSearchFailure[] shardFailures, + Clusters clusters, + String pointInTimeId + ) { + this( + internalResponse, + scrollId, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + null, + shardFailures, + clusters, + pointInTimeId + ); } public SearchResponse( @@ -137,6 +178,7 @@ public SearchResponse( int successfulShards, int skippedShards, long tookInMillis, + PhaseTook phaseTook, ShardSearchFailure[] shardFailures, Clusters clusters, String pointInTimeId @@ -149,6 +191,7 @@ public SearchResponse( this.successfulShards = successfulShards; this.skippedShards = skippedShards; this.tookInMillis = tookInMillis; + this.phaseTook = phaseTook; this.shardFailures = shardFailures; assert skippedShards <= totalShards : "skipped: " + skippedShards + " total: " + totalShards; assert scrollId == null || pointInTimeId == null : "SearchResponse can't have both scrollId [" @@ -211,6 +254,13 @@ public TimeValue getTook() { return new TimeValue(tookInMillis); } + /** + * How long the request took in each search phase. + */ + public PhaseTook getPhaseTook() { + return phaseTook; + } + /** * The total number of shards the search was executed on. */ @@ -299,6 +349,9 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t builder.field(POINT_IN_TIME_ID.getPreferredName(), pointInTimeId); } builder.field(TOOK.getPreferredName(), tookInMillis); + if (phaseTook != null) { + phaseTook.toXContent(builder, params); + } builder.field(TIMED_OUT.getPreferredName(), isTimedOut()); if (isTerminatedEarly() != null) { builder.field(TERMINATED_EARLY.getPreferredName(), isTerminatedEarly()); @@ -317,6 +370,7 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t ); clusters.toXContent(builder, params); internalResponse.toXContent(builder, params); + return builder; } @@ -337,6 +391,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE Boolean terminatedEarly = null; int numReducePhases = 1; long tookInMillis = -1; + PhaseTook phaseTook = null; int successfulShards = -1; int totalShards = -1; int skippedShards = 0; // 0 for BWC @@ -344,6 +399,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE String searchContextId = null; List failures = new ArrayList<>(); Clusters clusters = Clusters.EMPTY; + List extBuilders = new ArrayList<>(); for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) { if (token == Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -400,6 +456,24 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE parser.skipChildren(); } } + } else if (PhaseTook.PHASE_TOOK.match(currentFieldName, parser.getDeprecationHandler())) { + Map phaseTookMap = new HashMap<>(); + + while ((token = parser.nextToken()) != Token.END_OBJECT) { + if (token == Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + try { + SearchPhaseName.valueOf(currentFieldName.toUpperCase(Locale.ROOT)); + phaseTookMap.put(currentFieldName, parser.longValue()); + } catch (final IllegalArgumentException ex) { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + phaseTook = new PhaseTook(phaseTookMap); } else if (Clusters._CLUSTERS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { int successful = -1; int total = -1; @@ -422,6 +496,33 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE } } clusters = new Clusters(total, successful, skipped); + } else if (EXT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + String extSectionName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + extSectionName = parser.currentName(); + } else { + SearchExtBuilder searchExtBuilder; + try { + searchExtBuilder = parser.namedObject(SearchExtBuilder.class, extSectionName, null); + if (!searchExtBuilder.getWriteableName().equals(extSectionName)) { + throw new IllegalStateException( + "The parsed [" + + searchExtBuilder.getClass().getName() + + "] object has a " + + "different writeable name compared to the name of the section that it was parsed from: found [" + + searchExtBuilder.getWriteableName() + + "] expected [" + + extSectionName + + "]" + ); + } + } catch (XContentParseException e) { + searchExtBuilder = GenericSearchExtBuilder.fromXContent(parser); + } + extBuilders.add(searchExtBuilder); + } + } } else { parser.skipChildren(); } @@ -434,7 +535,8 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE timedOut, terminatedEarly, profile, - numReducePhases + numReducePhases, + extBuilders ); return new SearchResponse( searchResponseSections, @@ -443,6 +545,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE successfulShards, skippedShards, tookInMillis, + phaseTook, failures.toArray(ShardSearchFailure.EMPTY_ARRAY), clusters, searchContextId @@ -462,6 +565,9 @@ public void writeTo(StreamOutput out) throws IOException { clusters.writeTo(out); out.writeOptionalString(scrollId); out.writeVLong(tookInMillis); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(phaseTook); + } out.writeVInt(skippedShards); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { out.writeOptionalString(pointInTimeId); @@ -473,6 +579,10 @@ public String toString() { return Strings.toString(MediaTypeRegistry.JSON, this); } + public void addSearchExtBuilder(SearchExtBuilder searchExtBuilder) { + this.searchExtBuilders.add(searchExtBuilder); + } + /** * Holds info about the clusters that the search was executed on: how many in total, how many of them were successful * and how many of them were skipped. @@ -577,6 +687,67 @@ public String toString() { } } + /** + * Holds info about the clusters that the search was executed on: how many in total, how many of them were successful + * and how many of them were skipped. + * + * @opensearch.internal + */ + public static class PhaseTook implements ToXContentFragment, Writeable { + static final ParseField PHASE_TOOK = new ParseField("phase_took"); + private final Map phaseTookMap; + + public PhaseTook(Map phaseTookMap) { + this.phaseTookMap = phaseTookMap; + } + + private PhaseTook(StreamInput in) throws IOException { + this(in.readMap(StreamInput::readString, StreamInput::readLong)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(phaseTookMap, StreamOutput::writeString, StreamOutput::writeLong); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(PHASE_TOOK.getPreferredName()); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + if (phaseTookMap.containsKey(searchPhaseName.getName())) { + builder.field(searchPhaseName.getName(), phaseTookMap.get(searchPhaseName.getName())); + } else { + builder.field(searchPhaseName.getName(), 0); + } + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + PhaseTook phaseTook = (PhaseTook) o; + + if (phaseTook.phaseTookMap.equals(phaseTookMap)) { + return true; + } else { + return false; + } + } + + @Override + public int hashCode() { + return Objects.hash(phaseTookMap); + } + } + static SearchResponse empty(Supplier tookInMillisSupplier, Clusters clusters) { SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), Float.NaN); InternalSearchResponse internalSearchResponse = new InternalSearchResponse( diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java index f90e98106f93f..054bd578cc56c 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java @@ -236,6 +236,7 @@ SearchResponse getMergedResponse(SearchResponse.Clusters clusters) { successfulShards, skippedShards, tookInMillis, + searchTimeProvider.getPhaseTook(), shardFailures, clusters, null diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java b/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java index 214bc0448b90c..579a01f0dd932 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java @@ -32,9 +32,11 @@ package org.opensearch.action.search; +import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.search.SearchExtBuilder; import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.Aggregations; import org.opensearch.search.profile.ProfileShardResult; @@ -42,13 +44,16 @@ import org.opensearch.search.suggest.Suggest; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; import java.util.Map; +import java.util.Objects; /** * Base class that holds the various sections which a search response is * composed of (hits, aggs, suggestions etc.) and allows to retrieve them. - * + *

                  * The reason why this class exists is that the high level REST client uses its own classes * to parse aggregations into, which are not serializable. This is the common part that can be * shared between core and client. @@ -57,6 +62,8 @@ */ public class SearchResponseSections implements ToXContentFragment { + public static final ParseField EXT_FIELD = new ParseField("ext"); + protected final SearchHits hits; protected final Aggregations aggregations; protected final Suggest suggest; @@ -64,6 +71,7 @@ public class SearchResponseSections implements ToXContentFragment { protected final boolean timedOut; protected final Boolean terminatedEarly; protected final int numReducePhases; + protected final List searchExtBuilders = new ArrayList<>(); public SearchResponseSections( SearchHits hits, @@ -73,6 +81,19 @@ public SearchResponseSections( Boolean terminatedEarly, SearchProfileShardResults profileResults, int numReducePhases + ) { + this(hits, aggregations, suggest, timedOut, terminatedEarly, profileResults, numReducePhases, Collections.emptyList()); + } + + public SearchResponseSections( + SearchHits hits, + Aggregations aggregations, + Suggest suggest, + boolean timedOut, + Boolean terminatedEarly, + SearchProfileShardResults profileResults, + int numReducePhases, + List searchExtBuilders ) { this.hits = hits; this.aggregations = aggregations; @@ -81,6 +102,7 @@ public SearchResponseSections( this.timedOut = timedOut; this.terminatedEarly = terminatedEarly; this.numReducePhases = numReducePhases; + this.searchExtBuilders.addAll(Objects.requireNonNull(searchExtBuilders, "searchExtBuilders must not be null")); } public final boolean timedOut() { @@ -135,9 +157,20 @@ public final XContentBuilder toXContent(XContentBuilder builder, Params params) if (profileResults != null) { profileResults.toXContent(builder, params); } + if (!searchExtBuilders.isEmpty()) { + builder.startObject(EXT_FIELD.getPreferredName()); + for (SearchExtBuilder searchExtBuilder : searchExtBuilders) { + searchExtBuilder.toXContent(builder, params); + } + builder.endObject(); + } return builder; } + public List getSearchExtBuilders() { + return Collections.unmodifiableList(this.searchExtBuilders); + } + protected void writeTo(StreamOutput out) throws IOException { throw new UnsupportedOperationException(); } diff --git a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java index 614d576324026..b15a4b66e8870 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java @@ -76,7 +76,7 @@ private void deletePits(ActionListener listener, DeletePitReq /** * Delete all active PIT reader contexts leveraging list all PITs - * + *

                  * For Cross cluster PITs : * - mixed cluster PITs ( PIT comprising local and remote ) will be fully deleted. Since there will atleast be * one reader context with PIT ID present in local cluster, 'Get all PITs' will retrieve the PIT ID with which diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 25ec0fc57d19f..16b7e4810b130 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -67,6 +67,7 @@ import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.indices.breaker.CircuitBreakerService; @@ -87,6 +88,7 @@ import org.opensearch.search.profile.SearchProfileShardResults; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.metrics.MetricsRegistry; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.RemoteClusterAware; import org.opensearch.transport.RemoteClusterService; @@ -97,6 +99,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -135,6 +138,13 @@ public class TransportSearchAction extends HandledTransportAction SEARCH_QUERY_METRICS_ENABLED_SETTING = Setting.boolSetting( + "search.query.metrics.enabled", + false, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + // cluster level setting for timeout based search cancellation. If search request level parameter is present then that will take // precedence over the cluster setting value public static final String SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING_KEY = "search.cancel_after_time_interval"; @@ -145,6 +155,22 @@ public class TransportSearchAction extends HandledTransportAction SEARCH_REQUEST_STATS_ENABLED = Setting.boolSetting( + SEARCH_REQUEST_STATS_ENABLED_KEY, + false, + Property.Dynamic, + Property.NodeScope + ); + + public static final String SEARCH_PHASE_TOOK_ENABLED_KEY = "search.phase_took_enabled"; + public static final Setting SEARCH_PHASE_TOOK_ENABLED = Setting.boolSetting( + SEARCH_PHASE_TOOK_ENABLED_KEY, + false, + Property.Dynamic, + Property.NodeScope + ); + private final NodeClient client; private final ThreadPool threadPool; private final ClusterService clusterService; @@ -157,6 +183,16 @@ public class TransportSearchAction extends HandledTransportAction) SearchRequest::new); this.client = client; @@ -185,6 +223,24 @@ public TransportSearchAction( this.indexNameExpressionResolver = indexNameExpressionResolver; this.namedWriteableRegistry = namedWriteableRegistry; this.searchPipelineService = searchPipelineService; + this.isRequestStatsEnabled = clusterService.getClusterSettings().get(SEARCH_REQUEST_STATS_ENABLED); + clusterService.getClusterSettings().addSettingsUpdateConsumer(SEARCH_REQUEST_STATS_ENABLED, this::setIsRequestStatsEnabled); + this.searchRequestStats = searchRequestStats; + this.metricsRegistry = metricsRegistry; + this.searchQueryMetricsEnabled = clusterService.getClusterSettings().get(SEARCH_QUERY_METRICS_ENABLED_SETTING); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(SEARCH_QUERY_METRICS_ENABLED_SETTING, this::setSearchQueryMetricsEnabled); + } + + private void setSearchQueryMetricsEnabled(boolean searchQueryMetricsEnabled) { + this.searchQueryMetricsEnabled = searchQueryMetricsEnabled; + if ((this.searchQueryMetricsEnabled == true) && this.searchQueryCategorizer == null) { + this.searchQueryCategorizer = new SearchQueryCategorizer(metricsRegistry); + } + } + + private void setIsRequestStatsEnabled(boolean isRequestStatsEnabled) { + this.isRequestStatsEnabled = isRequestStatsEnabled; } private Map buildPerIndexAliasFilter( @@ -231,6 +287,8 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust } /** + * Listener to track request-level tookTime and phase tookTimes from the coordinator. + * * Search operations need two clocks. One clock is to fulfill real clock needs (e.g., resolving * "now" to an index name). Another clock is needed for measuring how long a search operation * took. These two uses are at odds with each other. There are many issues with using a real @@ -240,11 +298,12 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust * * @opensearch.internal */ - static final class SearchTimeProvider { + static final class SearchTimeProvider implements SearchRequestOperationsListener { private final long absoluteStartMillis; private final long relativeStartNanos; private final LongSupplier relativeCurrentNanosProvider; + private boolean phaseTook = false; /** * Instantiates a new search time provider. The absolute start time is the real clock time @@ -270,6 +329,47 @@ long getAbsoluteStartMillis() { long buildTookInMillis() { return TimeUnit.NANOSECONDS.toMillis(relativeCurrentNanosProvider.getAsLong() - relativeStartNanos); } + + public void setPhaseTook(boolean phaseTook) { + this.phaseTook = phaseTook; + } + + public boolean isPhaseTook() { + return phaseTook; + } + + SearchResponse.PhaseTook getPhaseTook() { + if (phaseTook) { + Map phaseTookMap = new HashMap<>(); + // Convert Map to Map for SearchResponse() + for (SearchPhaseName searchPhaseName : phaseStatsMap.keySet()) { + phaseTookMap.put(searchPhaseName.getName(), phaseStatsMap.get(searchPhaseName)); + } + return new SearchResponse.PhaseTook(phaseTookMap); + } else { + return null; + } + } + + Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); + + @Override + public void onPhaseStart(SearchPhaseContext context) {} + + @Override + public void onPhaseEnd(SearchPhaseContext context) { + phaseStatsMap.put( + context.getCurrentPhase().getSearchPhaseName(), + TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - context.getCurrentPhase().getStartTimeInNanos()) + ); + } + + @Override + public void onPhaseFailure(SearchPhaseContext context) {} + + public Long getPhaseTookTime(SearchPhaseName searchPhaseName) { + return phaseStatsMap.get(searchPhaseName); + } } @Override @@ -327,7 +427,8 @@ public AbstractSearchAsyncAction asyncSearchAction( ActionListener listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ) { return new AbstractSearchAsyncAction( actionName, @@ -346,7 +447,8 @@ public AbstractSearchAsyncAction asyncSearchAction( task, new ArraySearchPhaseResults<>(shardsIts.size()), searchRequest.getMaxConcurrentShardRequests(), - clusters + clusters, + searchRequestOperationsListener ) { @Override protected void executePhaseOnShard( @@ -390,20 +492,65 @@ private void executeRequest( relativeStartNanos, System::nanoTime ); + + final List searchListenersList = createSearchListenerList(originalSearchRequest, timeProvider); + + final SearchRequestOperationsListener searchRequestOperationsListener; + if (!CollectionUtils.isEmpty(searchListenersList)) { + searchRequestOperationsListener = new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); + } else { + searchRequestOperationsListener = null; + } + PipelinedRequest searchRequest; ActionListener listener; try { searchRequest = searchPipelineService.resolvePipeline(originalSearchRequest); - listener = ActionListener.wrap( - r -> originalListener.onResponse(searchRequest.transformResponse(r)), - originalListener::onFailure - ); + listener = searchRequest.transformResponseListener(originalListener); } catch (Exception e) { originalListener.onFailure(e); return; } - ActionListener rewriteListener = ActionListener.wrap(source -> { + ActionListener requestTransformListener = ActionListener.wrap(sr -> { + if (searchQueryMetricsEnabled) { + try { + searchQueryCategorizer.categorize(sr.source()); + } catch (Exception e) { + logger.error("Error while trying to categorize the query.", e); + } + } + + ActionListener rewriteListener = buildRewriteListener( + sr, + task, + timeProvider, + searchAsyncActionProvider, + listener, + searchRequestOperationsListener + ); + if (sr.source() == null) { + rewriteListener.onResponse(sr.source()); + } else { + Rewriteable.rewriteAndFetch( + sr.source(), + searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), + rewriteListener + ); + } + }, listener::onFailure); + searchRequest.transformRequest(requestTransformListener); + } + + private ActionListener buildRewriteListener( + SearchRequest searchRequest, + Task task, + SearchTimeProvider timeProvider, + SearchAsyncActionProvider searchAsyncActionProvider, + ActionListener listener, + SearchRequestOperationsListener searchRequestOperationsListener + ) { + return ActionListener.wrap(source -> { if (source != searchRequest.source()) { // only set it if it changed - we don't allow null values to be set but it might be already null. this way we catch // situations when source is rewritten to null due to a bug @@ -433,7 +580,8 @@ private void executeRequest( clusterState, listener, searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestOperationsListener ); } else { if (shouldMinimizeRoundtrips(searchRequest)) { @@ -454,7 +602,8 @@ private void executeRequest( clusterState, l, searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestOperationsListener ) ); } else { @@ -504,22 +653,14 @@ private void executeRequest( listener, new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters.get()), searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestOperationsListener ); }, listener::onFailure) ); } } }, listener::onFailure); - if (searchRequest.source() == null) { - rewriteListener.onResponse(searchRequest.source()); - } else { - Rewriteable.rewriteAndFetch( - searchRequest.source(), - searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), - rewriteListener - ); - } } static boolean shouldMinimizeRoundtrips(SearchRequest searchRequest) { @@ -593,6 +734,7 @@ public void onResponse(SearchResponse searchResponse) { searchResponse.getSuccessfulShards(), searchResponse.getSkippedShards(), timeProvider.buildTookInMillis(), + timeProvider.getPhaseTook(), searchResponse.getShardFailures(), new SearchResponse.Clusters(1, 1, 0), searchResponse.pointInTimeId() @@ -782,7 +924,8 @@ private void executeLocalSearch( ClusterState clusterState, ActionListener listener, SearchContextId searchContext, - SearchAsyncActionProvider searchAsyncActionProvider + SearchAsyncActionProvider searchAsyncActionProvider, + SearchRequestOperationsListener searchRequestOperationsListener ) { executeSearch( (SearchTask) task, @@ -796,7 +939,8 @@ private void executeLocalSearch( listener, SearchResponse.Clusters.EMPTY, searchContext, - searchAsyncActionProvider + searchAsyncActionProvider, + searchRequestOperationsListener ); } @@ -914,11 +1058,10 @@ private void executeSearch( ActionListener listener, SearchResponse.Clusters clusters, @Nullable SearchContextId searchContext, - SearchAsyncActionProvider searchAsyncActionProvider + SearchAsyncActionProvider searchAsyncActionProvider, + SearchRequestOperationsListener searchRequestOperationsListener ) { - clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); - // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead // of just for the _search api @@ -968,11 +1111,8 @@ private void executeSearch( indexRoutings = routingMap; } final GroupShardsIterator shardIterators = mergeShardsIterators(localShardIterators, remoteShardIterators); - failIfOverShardCountLimit(clusterService, shardIterators.size()); - Map concreteIndexBoosts = resolveIndexBoosts(searchRequest, clusterState); - // optimize search type for cases where there is only one shard group to search on if (shardIterators.size() == 1) { // if we only have one group, then we always want Q_T_F, no need for DFS, and no need to do THEN since we hit one shard @@ -1020,7 +1160,8 @@ private void executeSearch( listener, preFilterSearchShards, threadPool, - clusters + clusters, + searchRequestOperationsListener ).start(); } @@ -1103,10 +1244,33 @@ AbstractSearchAsyncAction asyncSearchAction( ActionListener listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ); } + private List createSearchListenerList(SearchRequest searchRequest, SearchTimeProvider timeProvider) { + final List searchListenersList = new ArrayList<>(); + + if (isRequestStatsEnabled) { + searchListenersList.add(searchRequestStats); + } + + // phase_took is enabled with request param and/or cluster setting + Boolean phaseTookRequestParam = searchRequest.isPhaseTook(); + if (phaseTookRequestParam == null) { // check cluster setting only when request param is undefined + if (clusterService.getClusterSettings().get(TransportSearchAction.SEARCH_PHASE_TOOK_ENABLED)) { + timeProvider.setPhaseTook(true); + searchListenersList.add(timeProvider); + } + } else if (phaseTookRequestParam == true) { + timeProvider.setPhaseTook(true); + searchListenersList.add(timeProvider); + } + + return searchListenersList; + } + private AbstractSearchAsyncAction searchAsyncAction( SearchTask task, SearchRequest searchRequest, @@ -1121,7 +1285,8 @@ private AbstractSearchAsyncAction searchAsyncAction ActionListener listener, boolean preFilter, ThreadPool threadPool, - SearchResponse.Clusters clusters + SearchResponse.Clusters clusters, + SearchRequestOperationsListener searchRequestOperationsListener ) { if (preFilter) { return new CanMatchPreFilterSearchPhase( @@ -1153,7 +1318,8 @@ private AbstractSearchAsyncAction searchAsyncAction listener, false, threadPool, - clusters + clusters, + searchRequestOperationsListener ); return new SearchPhase(action.getName()) { @Override @@ -1162,7 +1328,8 @@ public void run() { } }; }, - clusters + clusters, + searchRequestOperationsListener ); } else { final QueryPhaseResultConsumer queryResultConsumer = searchPhaseController.newSearchPhaseResults( @@ -1192,7 +1359,8 @@ public void run() { timeProvider, clusterState, task, - clusters + clusters, + searchRequestOperationsListener ); break; case QUERY_THEN_FETCH: @@ -1212,7 +1380,8 @@ public void run() { timeProvider, clusterState, task, - clusters + clusters, + searchRequestOperationsListener ); break; default: diff --git a/server/src/main/java/org/opensearch/action/support/TransportAction.java b/server/src/main/java/org/opensearch/action/support/TransportAction.java index daa11c2d7d80f..72aae210d61ae 100644 --- a/server/src/main/java/org/opensearch/action/support/TransportAction.java +++ b/server/src/main/java/org/opensearch/action/support/TransportAction.java @@ -81,7 +81,7 @@ private Releasable registerChildNode(TaskId parentTask) { /** * Use this method when the transport action call should result in creation of a new task associated with the call. - * + *

                  * This is a typical behavior. */ public final Task execute(Request request, ActionListener listener) { diff --git a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java index f5fb41dc5bae3..4d54ce51c923c 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodesRequest.java @@ -53,9 +53,9 @@ public abstract class BaseNodesRequest * the list of nodesIds that will be used to resolve this request and {@link #concreteNodes} * will be populated. Note that if {@link #concreteNodes} is not null, it will be used and nodeIds * will be ignored. - * + *

                  * See {@link DiscoveryNodes#resolveNodes} for a full description of the options. - * + *

                  * TODO: get rid of this and resolve it to concrete nodes in the rest layer **/ private String[] nodesIds; diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java index 60c490a50575a..9f69d41d83f5b 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java @@ -479,7 +479,7 @@ public interface Primary< /** * Notifies the primary of a local checkpoint for the given allocation. - * + *

                  * Note: The primary will use this information to advance the global checkpoint if possible. * * @param allocationId allocation ID of the shard corresponding to the supplied local checkpoint diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index de5a92fdcc4b1..b68bd13cfed80 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -100,7 +100,7 @@ /** * Base class for requests that should be executed on a primary copy followed by replica copies. * Subclasses can resolve the target shard and provide implementation for primary and replica operations. - * + *

                  * The action samples cluster state on the receiving node to reroute to node with primary copy and on the * primary node to validate request before primary operation followed by sampling state again for resolving * nodes with replica copies to perform replication. @@ -866,7 +866,7 @@ protected IndexShard getIndexShard(final ShardId shardId) { * Responsible for routing and retrying failed operations on the primary. * The actual primary operation is done in {@link ReplicationOperation} on the * node with primary copy. - * + *

                  * Resolves index and shard id for the request before routing it to target node * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java index 62cbfbde9780a..9ebfa8cfd0df8 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java @@ -59,6 +59,11 @@ import org.opensearch.index.translog.Translog.Location; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.listener.TraceableActionListener; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -82,6 +87,7 @@ public abstract class TransportWriteAction< protected final SystemIndices systemIndices; private final Function executorFunction; + private final Tracer tracer; protected TransportWriteAction( Settings settings, @@ -97,7 +103,8 @@ protected TransportWriteAction( Function executorFunction, boolean forceExecutionOnPrimary, IndexingPressureService indexingPressureService, - SystemIndices systemIndices + SystemIndices systemIndices, + Tracer tracer ) { // We pass ThreadPool.Names.SAME to the super class as we control the dispatching to the // ThreadPool.Names.WRITE/ThreadPool.Names.SYSTEM_WRITE thread pools in this class. @@ -119,6 +126,7 @@ protected TransportWriteAction( this.executorFunction = executorFunction; this.indexingPressureService = indexingPressureService; this.systemIndices = systemIndices; + this.tracer = tracer; } protected String executor(IndexShard shard) { @@ -220,7 +228,12 @@ protected void shardOperationOnPrimary( threadPool.executor(executor).execute(new ActionRunnable>(listener) { @Override protected void doRun() { - dispatchedShardOperationOnPrimary(request, primary, listener); + Span span = tracer.startSpan( + SpanBuilder.from("dispatchedShardOperationOnPrimary", clusterService.localNode().getId(), request) + ); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + dispatchedShardOperationOnPrimary(request, primary, TraceableActionListener.create(listener, span, tracer)); + } } @Override @@ -248,7 +261,12 @@ protected void shardOperationOnReplica(ReplicaRequest request, IndexShard replic threadPool.executor(executorFunction.apply(replica)).execute(new ActionRunnable(listener) { @Override protected void doRun() { - dispatchedShardOperationOnReplica(request, replica, listener); + Span span = tracer.startSpan( + SpanBuilder.from("dispatchedShardOperationOnReplica", clusterService.localNode().getId(), request) + ); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + dispatchedShardOperationOnReplica(request, replica, TraceableActionListener.create(listener, span, tracer)); + } } @Override @@ -266,7 +284,7 @@ protected abstract void dispatchedShardOperationOnReplica( /** * Result of taking the action on the primary. - * + *

                  * NOTE: public for testing * * @opensearch.internal @@ -496,7 +514,7 @@ void run() { * A proxy for write operations that need to be performed on the * replicas, where a failure to execute the operation should fail * the replica shard and/or mark the replica as stale. - * + *

                  * This extends {@code TransportReplicationAction.ReplicasProxy} to do the * failing and stale-ing. * diff --git a/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java b/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java index c474096ff94e4..56b34aea8248d 100644 --- a/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java +++ b/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java @@ -55,7 +55,7 @@ public abstract class SingleShardRequest * Whether index property is optional depends on the concrete implementation. If index property is required the * concrete implementation should use {@link #validateNonNullIndex()} to check if the index property has been set */ diff --git a/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksRequest.java b/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksRequest.java index a10f74f7c2aa8..c2ae333b17055 100644 --- a/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/support/tasks/BaseTasksRequest.java @@ -128,7 +128,7 @@ public final Request setNodes(String... nodes) { /** * Returns the id of the task that should be processed. - * + *

                  * By default tasks with any ids are returned. */ public TaskId getTaskId() { diff --git a/server/src/main/java/org/opensearch/action/termvectors/TransportTermVectorsAction.java b/server/src/main/java/org/opensearch/action/termvectors/TransportTermVectorsAction.java index a76506b39f811..b7e8a29bd4027 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TransportTermVectorsAction.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TransportTermVectorsAction.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.routing.GroupShardsIterator; +import org.opensearch.cluster.routing.Preference; import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -87,15 +88,24 @@ public TransportTermVectorsAction( @Override protected ShardIterator shards(ClusterState state, InternalRequest request) { + + String preference = request.request().preference; + // For a real time request on a seg rep index, use primary shard as the preferred query shard. + if (request.request().realtime() + && preference == null + && state.getMetadata().isSegmentReplicationEnabled(request.concreteIndex())) { + preference = Preference.PRIMARY.type(); + } + if (request.request().doc() != null && request.request().routing() == null) { // artificial document without routing specified, ignore its "id" and use either random shard or according to preference GroupShardsIterator groupShardsIter = clusterService.operationRouting() - .searchShards(state, new String[] { request.concreteIndex() }, null, request.request().preference()); + .searchShards(state, new String[] { request.concreteIndex() }, null, preference); return groupShardsIter.iterator().next(); } return clusterService.operationRouting() - .getShards(state, request.concreteIndex(), request.request().id(), request.request().routing(), request.request().preference()); + .getShards(state, request.concreteIndex(), request.request().id(), request.request().routing(), preference); } @Override diff --git a/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java index 95735f71a38e7..819112eb497f6 100644 --- a/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java +++ b/server/src/main/java/org/opensearch/action/update/TransportUpdateAction.java @@ -32,6 +32,7 @@ package org.opensearch.action.update; +import org.opensearch.ExceptionsHelper; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.action.ActionRunnable; import org.opensearch.action.DocWriteRequest; @@ -62,11 +63,13 @@ import org.opensearch.core.common.io.stream.NotSerializableExceptionWrapper; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaType; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; import org.opensearch.index.engine.VersionConflictEngineException; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; import org.opensearch.indices.IndicesService; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; @@ -154,10 +157,13 @@ public static void resolveAndValidateRouting(Metadata metadata, String concreteI @Override protected void doExecute(Task task, final UpdateRequest request, final ActionListener listener) { if (request.isRequireAlias() && (clusterService.state().getMetadata().hasAlias(request.index()) == false)) { - throw new IndexNotFoundException( + IndexNotFoundException e = new IndexNotFoundException( "[" + DocWriteRequest.REQUIRE_ALIAS + "] request flag is [true] and [" + request.index() + "] is not an alias", request.index() ); + + incDocStatusStats(e); + throw e; } // if we don't have a master, we don't have metadata, that's fine, let it find a cluster-manager using create index API if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) { @@ -193,7 +199,10 @@ public void onFailure(Exception e) { } private void innerExecute(final Task task, final UpdateRequest request, final ActionListener listener) { - super.doExecute(task, request, listener); + super.doExecute(task, request, ActionListener.wrap(listener::onResponse, e -> { + incDocStatusStats(e); + listener.onFailure(e); + })); } @Override @@ -330,7 +339,13 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< shard.noopUpdate(); } } + + DocStatusStats stats = new DocStatusStats(); + stats.inc(RestStatus.OK); + + indicesService.addDocStatusStats(stats); listener.onResponse(update); + break; default: throw new IllegalStateException("Illegal result " + result.getResponseResult()); @@ -361,4 +376,10 @@ private void handleUpdateFailureWithRetry( } listener.onFailure(cause instanceof Exception ? (Exception) cause : new NotSerializableExceptionWrapper(cause)); } + + private void incDocStatusStats(final Exception e) { + DocStatusStats stats = new DocStatusStats(); + stats.inc(ExceptionsHelper.status(e)); + indicesService.addDocStatusStats(stats); + } } diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java index ca38c0c4273c6..7db7934187d13 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java @@ -612,7 +612,7 @@ public long ifSeqNo() { /** * If set, only perform this update request if the document was last modification was assigned this primary term. - * + *

                  * If the document last modification was assigned a different term a * {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown. */ diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapCheck.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapCheck.java index 429612ba1b93d..a695486bd084c 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapCheck.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapCheck.java @@ -32,18 +32,22 @@ package org.opensearch.bootstrap; +import org.opensearch.common.annotation.PublicApi; + import java.util.Objects; /** * Encapsulates a bootstrap check. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface BootstrapCheck { /** * Encapsulate the result of a bootstrap check. */ + @PublicApi(since = "1.0.0") final class BootstrapCheckResult { private final String message; diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java index f9661e71d60e6..e43c42446de2c 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java @@ -73,7 +73,7 @@ /** * We enforce bootstrap checks once a node has the transport protocol bound to a non-loopback interface or if the system property {@code - * opensearch.enforce.bootstrap.checks} is set to {@true}. In this case we assume the node is running in production and + * opensearch.enforce.bootstrap.checks} is set to {@code true}. In this case we assume the node is running in production and * all bootstrap checks must pass. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapContext.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapContext.java index 1cfd8bf6dfc35..a7ffd701d07f0 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapContext.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapContext.java @@ -32,14 +32,16 @@ package org.opensearch.bootstrap; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; /** * Context that is passed to every bootstrap check to make decisions on. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BootstrapContext { /** * The node's environment diff --git a/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java b/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java index 8e556df4b2f9b..91da34fb7216d 100644 --- a/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java +++ b/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java @@ -141,7 +141,7 @@ public boolean callback(long dwCtrlType) { /** * Memory protection constraints - * + *

                  * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366786%28v=vs.85%29.aspx */ public static final int PAGE_NOACCESS = 0x0001; @@ -151,7 +151,7 @@ public boolean callback(long dwCtrlType) { /** * Contains information about a range of pages in the virtual address space of a process. * The VirtualQuery and VirtualQueryEx functions use this structure. - * + *

                  * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366775%28v=vs.85%29.aspx */ public static class MemoryBasicInformation extends Structure { @@ -186,7 +186,7 @@ public SizeT() { /** * Locks the specified region of the process's virtual address space into physical * memory, ensuring that subsequent access to the region will not incur a page fault. - * + *

                  * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366895%28v=vs.85%29.aspx * * @param address A pointer to the base address of the region of pages to be locked. @@ -197,7 +197,7 @@ public SizeT() { /** * Retrieves information about a range of pages within the virtual address space of a specified process. - * + *

                  * https://msdn.microsoft.com/en-us/library/windows/desktop/aa366907%28v=vs.85%29.aspx * * @param handle A handle to the process whose memory information is queried. @@ -210,7 +210,7 @@ public SizeT() { /** * Sets the minimum and maximum working set sizes for the specified process. - * + *

                  * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686234%28v=vs.85%29.aspx * * @param handle A handle to the process whose working set sizes is to be set. @@ -222,7 +222,7 @@ public SizeT() { /** * Retrieves a pseudo handle for the current process. - * + *

                  * https://msdn.microsoft.com/en-us/library/windows/desktop/ms683179%28v=vs.85%29.aspx * * @return a pseudo handle to the current process. @@ -231,7 +231,7 @@ public SizeT() { /** * Closes an open object handle. - * + *

                  * https://msdn.microsoft.com/en-us/library/windows/desktop/ms724211%28v=vs.85%29.aspx * * @param handle A valid handle to an open object. @@ -252,7 +252,7 @@ public SizeT() { /** * Creates or opens a new job object - * + *

                  * https://msdn.microsoft.com/en-us/library/windows/desktop/ms682409%28v=vs.85%29.aspx * * @param jobAttributes security attributes @@ -263,7 +263,7 @@ public SizeT() { /** * Associates a process with an existing job - * + *

                  * https://msdn.microsoft.com/en-us/library/windows/desktop/ms681949%28v=vs.85%29.aspx * * @param job job handle @@ -274,7 +274,7 @@ public SizeT() { /** * Basic limit information for a job object - * + *

                  * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147%28v=vs.85%29.aspx */ public static class JOBOBJECT_BASIC_LIMIT_INFORMATION extends Structure implements Structure.ByReference { @@ -316,7 +316,7 @@ protected List getFieldOrder() { /** * Get job limit and state information - * + *

                  * https://msdn.microsoft.com/en-us/library/windows/desktop/ms684925%28v=vs.85%29.aspx * * @param job job handle @@ -330,7 +330,7 @@ protected List getFieldOrder() { /** * Set job limit and state information - * + *

                  * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686216%28v=vs.85%29.aspx * * @param job job handle diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java index 8df1d6c6df3da..4d062b7ff1f73 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java @@ -188,9 +188,9 @@ void init(final boolean daemonize, final Path pidFile, final boolean quiet, Envi /** * Required method that's called by Apache Commons procrun when * running as a service on Windows, when the service is stopped. - * + *

                  * http://commons.apache.org/proper/commons-daemon/procrun.html - * + *

                  * NOTE: If this method is renamed and/or moved, make sure to * update opensearch-service.bat! */ diff --git a/server/src/main/java/org/opensearch/client/AdminClient.java b/server/src/main/java/org/opensearch/client/AdminClient.java index 0c6c97b795983..1a5a39be4241a 100644 --- a/server/src/main/java/org/opensearch/client/AdminClient.java +++ b/server/src/main/java/org/opensearch/client/AdminClient.java @@ -32,13 +32,16 @@ package org.opensearch.client; +import org.opensearch.common.annotation.PublicApi; + /** * Administrative actions/operations against the cluster or the indices. * * @see org.opensearch.client.Client#admin() * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface AdminClient { /** diff --git a/server/src/main/java/org/opensearch/client/Client.java b/server/src/main/java/org/opensearch/client/Client.java index 551c64ad1c835..f4ae383249f61 100644 --- a/server/src/main/java/org/opensearch/client/Client.java +++ b/server/src/main/java/org/opensearch/client/Client.java @@ -83,6 +83,7 @@ import org.opensearch.action.update.UpdateResponse; import org.opensearch.common.Nullable; import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; @@ -102,8 +103,9 @@ * * @see org.opensearch.node.Node#client() * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Client extends OpenSearchClient, Releasable { Setting CLIENT_TYPE_SETTING_S = new Setting<>("client.type", "node", (s) -> { diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index 0b511fa95b9d0..05f09c1a6e661 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -157,6 +157,7 @@ import org.opensearch.action.search.PutSearchPipelineRequest; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.tasks.TaskId; @@ -167,8 +168,9 @@ * * @see AdminClient#cluster() * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ClusterAdminClient extends OpenSearchClient { /** diff --git a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java index 72b986ee25a31..20dab1caa36c4 100644 --- a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java +++ b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java @@ -129,6 +129,7 @@ import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; import org.opensearch.common.Nullable; import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionListener; /** @@ -136,8 +137,9 @@ * * @see AdminClient#indices() * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface IndicesAdminClient extends OpenSearchClient { /** diff --git a/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java index 482087be1c8eb..28e1e7c53cb9c 100644 --- a/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java +++ b/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java @@ -44,7 +44,7 @@ public interface AckedClusterStateTaskListener extends ClusterStateTaskListener /** * Called to determine which nodes the acknowledgement is expected from. - * + *

                  * As this method will be called multiple times to determine the set of acking nodes, * it is crucial for it to return consistent results: Given the same listener instance * and the same node parameter, the method implementation should return the same result. diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java index 50beeb1f03deb..bf8494cc36857 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java @@ -70,7 +70,7 @@ default boolean runOnlyOnMaster() { /** * Callback invoked after new cluster state is published. Note that * this method is not invoked if the cluster state was not updated. - * + *

                  * Note that this method will be executed using system context. * * @param clusterChangedEvent the change event for this cluster state change, containing @@ -80,7 +80,7 @@ default void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) {} /** * Builds a concise description of a list of tasks (to be used in logging etc.). - * + *

                  * Note that the tasks given are not necessarily the same as those that will be passed to {@link #execute(ClusterState, List)}. * but are guaranteed to be a subset of them. This method can be called multiple times with different lists before execution. * This allows groupd task description but the submitting source. diff --git a/server/src/main/java/org/opensearch/cluster/DiffableUtils.java b/server/src/main/java/org/opensearch/cluster/DiffableUtils.java index dd2232968114e..a38fc81bebc08 100644 --- a/server/src/main/java/org/opensearch/cluster/DiffableUtils.java +++ b/server/src/main/java/org/opensearch/cluster/DiffableUtils.java @@ -182,7 +182,7 @@ public Map apply(Map map) { /** * Represents differences between two maps of objects and is used as base class for different map implementations. - * + *

                  * Implements serialization. How differences are applied is left to subclasses. * * @param the type of map keys @@ -381,9 +381,9 @@ public Integer readKey(StreamInput in) throws IOException { /** * Provides read and write operations to serialize map values. * Reading of values can be made dependent on map key. - * + *

                  * Also provides operations to distinguish whether map values are diffable. - * + *

                  * Should not be directly implemented, instead implement either * {@link DiffableValueSerializer} or {@link NonDiffableValueSerializer}. * @@ -517,7 +517,7 @@ public Diff readDiff(StreamInput in, K key) throws IOException { /** * Implementation of the ValueSerializer that wraps value and diff readers. - * + *

                  * Note: this implementation is ignoring the key. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java index 35490d2f37a49..e381b8f244bf3 100644 --- a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java @@ -82,7 +82,7 @@ * to 30 seconds). The InternalClusterInfoService only runs on the cluster-manager node. * Listens for changes in the number of data nodes and immediately submits a * ClusterInfoUpdateJob if a node has been added. - * + *

                  * Every time the timer runs, gathers information about the disk usage and * shard sizes across the cluster. * diff --git a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java index 042a4743ca25d..3e0c78099e6b4 100644 --- a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java @@ -189,7 +189,7 @@ public Snapshot snapshot() { /** * Returns list of shards that being restore and their status * - * @return list of shards + * @return map of shard id to shard restore status */ public Map shards() { return this.shards; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java index 39d05e672f977..3a506397bcac8 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterStatePublisher.java @@ -46,11 +46,11 @@ public interface ClusterStatePublisher { /** * Publish all the changes to the cluster from the cluster-manager (can be called just by the cluster-manager). The publish * process should apply this state to the cluster-manager as well! - * + *

                  * The publishListener allows to wait for the publication to complete, which can be either successful completion, timing out or failing. * The method is guaranteed to pass back a {@link FailedToCommitClusterStateException} to the publishListener if the change is not * committed and should be rejected. Any other exception signals that something bad happened but the change is committed. - * + *

                  * The {@link AckListener} allows to keep track of the ack received from nodes, and verify whether * they updated their own cluster state or not. */ diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index 08cd7d0ab02db..987a3e3ffa7d3 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -35,8 +35,11 @@ import org.apache.logging.log4j.Logger; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.io.IOUtils; import java.io.Closeable; import java.io.IOException; @@ -49,6 +52,7 @@ import java.util.Set; import static org.opensearch.cluster.coordination.Coordinator.ZEN1_BWC_TERM; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; /** * The core class of the cluster state coordination algorithm, directly implementing the @@ -64,8 +68,8 @@ public class CoordinationState { private final ElectionStrategy electionStrategy; - // persisted state - private final PersistedState persistedState; + // persisted state registry + private final PersistedStateRegistry persistedStateRegistry; // transient state private VoteCollection joinVotes; @@ -74,12 +78,18 @@ public class CoordinationState { private long lastPublishedVersion; private VotingConfiguration lastPublishedConfiguration; private VoteCollection publishVotes; - - public CoordinationState(DiscoveryNode localNode, PersistedState persistedState, ElectionStrategy electionStrategy) { + private final boolean isRemoteStateEnabled; + + public CoordinationState( + DiscoveryNode localNode, + PersistedStateRegistry persistedStateRegistry, + ElectionStrategy electionStrategy, + Settings settings + ) { this.localNode = localNode; - // persisted state - this.persistedState = persistedState; + // persisted state registry + this.persistedStateRegistry = persistedStateRegistry; this.electionStrategy = electionStrategy; // transient state @@ -87,16 +97,19 @@ public CoordinationState(DiscoveryNode localNode, PersistedState persistedState, this.startedJoinSinceLastReboot = false; this.electionWon = false; this.lastPublishedVersion = 0L; - this.lastPublishedConfiguration = persistedState.getLastAcceptedState().getLastAcceptedConfiguration(); + this.lastPublishedConfiguration = persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL) + .getLastAcceptedState() + .getLastAcceptedConfiguration(); this.publishVotes = new VoteCollection(); + this.isRemoteStateEnabled = isRemoteStoreClusterStateEnabled(settings); } public long getCurrentTerm() { - return persistedState.getCurrentTerm(); + return persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL).getCurrentTerm(); } public ClusterState getLastAcceptedState() { - return persistedState.getLastAcceptedState(); + return persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL).getLastAcceptedState(); } public long getLastAcceptedTerm() { @@ -186,7 +199,7 @@ public void setInitialState(ClusterState initialState) { assert initialState.getLastAcceptedConfiguration().isEmpty() == false; assert initialState.getLastCommittedConfiguration().isEmpty() == false; - persistedState.setLastAcceptedState(initialState); + persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL).setLastAcceptedState(initialState); } /** @@ -222,7 +235,7 @@ public Join handleStartJoin(StartJoinRequest startJoinRequest) { logger.debug("handleStartJoin: discarding {}: {}", joinVotes, reason); } - persistedState.setCurrentTerm(startJoinRequest.getTerm()); + persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL).setCurrentTerm(startJoinRequest.getTerm()); assert getCurrentTerm() == startJoinRequest.getTerm(); lastPublishedVersion = 0; lastPublishedConfiguration = getLastAcceptedConfiguration(); @@ -436,7 +449,7 @@ public PublishResponse handlePublishRequest(PublishRequest publishRequest) { clusterState.version(), clusterState.term() ); - persistedState.setLastAcceptedState(clusterState); + persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL).setLastAcceptedState(clusterState); assert getLastAcceptedState() == clusterState; return new PublishResponse(clusterState.term(), clusterState.version()); @@ -490,6 +503,7 @@ public Optional handlePublishResponse(DiscoveryNode sourceNo publishResponse.getVersion(), publishResponse.getTerm() ); + handlePreCommit(); return Optional.of(new ApplyCommitRequest(localNode, publishResponse.getTerm(), publishResponse.getVersion())); } @@ -547,10 +561,36 @@ public void handleCommit(ApplyCommitRequest applyCommit) { applyCommit.getVersion() ); - persistedState.markLastAcceptedStateAsCommitted(); + persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL).markLastAcceptedStateAsCommitted(); assert getLastCommittedConfiguration().equals(getLastAcceptedConfiguration()); } + /** + * This method should be called just before sending the PublishRequest to all cluster nodes. + * @param clusterState The cluster state for which pre publish activities should happen. + */ + public void handlePrePublish(ClusterState clusterState) { + // Publishing the current state to remote store before sending the cluster state to other nodes. + // This is to ensure the remote store is the single source of truth for current state. Even if the current node + // goes down after sending the cluster state to other nodes, we should be able to read the remote state and + // recover the cluster. + if (isRemoteStateEnabled) { + assert persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE) != null : "Remote state has not been initialized"; + persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).setLastAcceptedState(clusterState); + } + } + + /** + * This method should be called just before sending the ApplyCommitRequest to all cluster nodes. + */ + public void handlePreCommit() { + // Publishing the committed state to remote store before sending apply commit to other nodes. + if (isRemoteStateEnabled) { + assert persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE) != null : "Remote state has not been initialized"; + persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).markLastAcceptedStateAsCommitted(); + } + } + public void invariant() { assert getLastAcceptedTerm() <= getCurrentTerm(); assert electionWon() == isElectionQuorum(joinVotes); @@ -564,7 +604,7 @@ public void invariant() { } public void close() throws IOException { - persistedState.close(); + IOUtils.close(persistedStateRegistry); } /** @@ -598,6 +638,12 @@ public interface PersistedState extends Closeable { */ void setLastAcceptedState(ClusterState clusterState); + /** + * Returns the stats for the persistence layer for {@link CoordinationState}. + * @return PersistedStateStats + */ + PersistedStateStats getStats(); + /** * Marks the last accepted cluster state as committed. * After a successful call to this method, {@link #getLastAcceptedState()} should return the last cluster state that was set, diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index aa9feab1baa6a..af63236a73ade 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -57,6 +57,7 @@ import org.opensearch.cluster.service.ClusterApplier; import org.opensearch.cluster.service.ClusterApplier.ClusterApplyListener; import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; @@ -86,6 +87,7 @@ import org.opensearch.discovery.SeedHostsResolver; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportService; @@ -182,6 +184,8 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private JoinHelper.JoinAccumulator joinAccumulator; private Optional currentPublication = Optional.empty(); private final NodeHealthService nodeHealthService; + private final PersistedStateRegistry persistedStateRegistry; + private final RemoteStoreNodeService remoteStoreNodeService; /** * @param nodeName The name of the node, used to name the {@link java.util.concurrent.ExecutorService} of the {@link SeedHostsResolver}. @@ -202,7 +206,9 @@ public Coordinator( Random random, RerouteService rerouteService, ElectionStrategy electionStrategy, - NodeHealthService nodeHealthService + NodeHealthService nodeHealthService, + PersistedStateRegistry persistedStateRegistry, + RemoteStoreNodeService remoteStoreNodeService ) { this.settings = settings; this.transportService = transportService; @@ -216,6 +222,7 @@ public Coordinator( allocationService, clusterManagerService, transportService, + remoteStoreNodeService, this::getCurrentTerm, this::getStateForClusterManagerService, this::handleJoinRequest, @@ -287,7 +294,9 @@ public Coordinator( joinHelper::logLastFailedJoinAttempt ); this.nodeHealthService = nodeHealthService; + this.persistedStateRegistry = persistedStateRegistry; this.localNodeCommissioned = true; + this.remoteStoreNodeService = remoteStoreNodeService; } private ClusterFormationState getClusterFormationState() { @@ -821,8 +830,7 @@ boolean publicationInProgress() { @Override protected void doStart() { synchronized (mutex) { - CoordinationState.PersistedState persistedState = persistedStateSupplier.get(); - coordinationState.set(new CoordinationState(getLocalNode(), persistedState, electionStrategy)); + coordinationState.set(new CoordinationState(getLocalNode(), persistedStateRegistry, electionStrategy, settings)); peerFinder.setCurrentTerm(getCurrentTerm()); configuredHostsResolver.start(); final ClusterState lastAcceptedState = coordinationState.get().getLastAcceptedState(); @@ -859,7 +867,16 @@ protected void doStart() { @Override public DiscoveryStats stats() { - return new DiscoveryStats(new PendingClusterStateStats(0, 0, 0), publicationHandler.stats()); + ClusterStateStats clusterStateStats = clusterManagerService.getClusterStateStats(); + ArrayList stats = new ArrayList<>(); + Stream.of(PersistedStateRegistry.PersistedStateType.values()).forEach(stateType -> { + if (persistedStateRegistry.getPersistedState(stateType) != null + && persistedStateRegistry.getPersistedState(stateType).getStats() != null) { + stats.add(persistedStateRegistry.getPersistedState(stateType).getStats()); + } + }); + clusterStateStats.setPersistenceStats(stats); + return new DiscoveryStats(new PendingClusterStateStats(0, 0, 0), publicationHandler.stats(), clusterStateStats); } @Override @@ -1309,6 +1326,7 @@ assert getLocalNode().equals(clusterState.getNodes().get(getLocalNode().getId()) leaderChecker.setCurrentNodes(publishNodes); followersChecker.setCurrentNodes(publishNodes); lagDetector.setTrackedNodes(publishNodes); + coordinationState.get().handlePrePublish(clusterState); publication.start(followersChecker.getFaultyNodes()); } } catch (Exception e) { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java b/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java index 67ef82ee7b2e9..b77ede5471534 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/InMemoryPersistedState.java @@ -65,6 +65,11 @@ public void setLastAcceptedState(ClusterState clusterState) { this.acceptedState = clusterState; } + @Override + public PersistedStateStats getStats() { + return null; + } + @Override public long getCurrentTerm() { return currentTerm; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index 399a986ffdec8..0976d15c2a96b 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -61,6 +61,7 @@ import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.BytesTransportRequest; @@ -135,6 +136,7 @@ public class JoinHelper { AllocationService allocationService, ClusterManagerService clusterManagerService, TransportService transportService, + RemoteStoreNodeService remoteStoreNodeService, LongSupplier currentTermSupplier, Supplier currentStateSupplier, BiConsumer joinHandler, @@ -152,7 +154,14 @@ public class JoinHelper { this.nodeCommissioned = nodeCommissioned; this.namedWriteableRegistry = namedWriteableRegistry; - this.joinTaskExecutorGenerator = () -> new JoinTaskExecutor(settings, allocationService, logger, rerouteService, transportService) { + this.joinTaskExecutorGenerator = () -> new JoinTaskExecutor( + settings, + allocationService, + logger, + rerouteService, + transportService, + remoteStoreNodeService + ) { private final long term = currentTermSupplier.getAsLong(); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 16de071270c99..3e43cbebd3f47 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -41,6 +41,7 @@ import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RerouteService; @@ -48,6 +49,8 @@ import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.persistent.PersistentTasksCustomMetadata; import org.opensearch.transport.TransportService; @@ -63,6 +66,9 @@ import static org.opensearch.cluster.decommission.DecommissionHelper.nodeCommissioned; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.STRICT; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; /** * Main executor for Nodes joining the OpenSearch cluster @@ -77,6 +83,8 @@ public class JoinTaskExecutor implements ClusterStateTaskExecutor execute(ClusterState currentState, List jo DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes()); + // TODO: We are using one of the existing node to build the repository metadata, this will need to be updated + // once we start supporting mixed compatibility mode. An optimization can be done as this will get invoked + // for every set of node join task which we can optimize to not compute if cluster state already has + // repository information. + RepositoriesMetadata repositoriesMetadata = remoteStoreNodeService.updateRepositoriesMetadata( + (currentNodes.getNodes().values()).stream().findFirst().get(), + currentState.getMetadata().custom(RepositoriesMetadata.TYPE) + ); + assert nodesBuilder.isLocalNodeElectedClusterManager(); Version minClusterNodeVersion = newState.nodes().getMinNodeVersion(); @@ -190,7 +209,7 @@ public ClusterTasksResult execute(ClusterState currentState, List jo if (enforceMajorVersion) { ensureMajorVersionBarrier(node.getVersion(), minClusterNodeVersion); } - ensureNodesCompatibility(node.getVersion(), minClusterNodeVersion, maxClusterNodeVersion); + ensureNodesCompatibility(node, currentNodes, currentState.metadata(), minClusterNodeVersion, maxClusterNodeVersion); // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices // we have to reject nodes that don't support all indices we have in this cluster ensureIndexCompatibility(node.getVersion(), currentState.getMetadata()); @@ -242,16 +261,36 @@ public ClusterTasksResult execute(ClusterState currentState, List jo .coordinationMetadata(coordMetadataBuilder.build()) .build(); return results.build( - allocationService.adaptAutoExpandReplicas(newState.nodes(nodesBuilder).metadata(newMetadata).build()) + allocationService.adaptAutoExpandReplicas( + newState.nodes(nodesBuilder) + .metadata(updateMetadataWithRepositoriesMetadata(newMetadata, repositoriesMetadata)) + .build() + ) ); } } - return results.build(allocationService.adaptAutoExpandReplicas(newState.nodes(nodesBuilder).build())); + return results.build( + allocationService.adaptAutoExpandReplicas( + newState.nodes(nodesBuilder) + .metadata(updateMetadataWithRepositoriesMetadata(currentState.metadata(), repositoriesMetadata)) + .build() + ) + ); } else { // we must return a new cluster state instance to force publishing. This is important // for the joining node to finalize its join and set us as a cluster-manager - return results.build(newState.build()); + return results.build( + newState.metadata(updateMetadataWithRepositoriesMetadata(currentState.metadata(), repositoriesMetadata)).build() + ); + } + } + + private Metadata updateMetadataWithRepositoriesMetadata(Metadata currentMetadata, RepositoriesMetadata repositoriesMetadata) { + if (repositoriesMetadata == null || repositoriesMetadata.repositories() == null || repositoriesMetadata.repositories().isEmpty()) { + return currentMetadata; + } else { + return Metadata.builder(currentMetadata).putCustom(RepositoriesMetadata.TYPE, repositoriesMetadata.get()).build(); } } @@ -425,16 +464,24 @@ public static void ensureIndexCompatibility(final Version nodeVersion, Metadata /** * ensures that the joining node has a version that's compatible with all current nodes */ - public static void ensureNodesCompatibility(final Version joiningNodeVersion, DiscoveryNodes currentNodes) { + public static void ensureNodesCompatibility(final DiscoveryNode joiningNode, DiscoveryNodes currentNodes, Metadata metadata) { final Version minNodeVersion = currentNodes.getMinNodeVersion(); final Version maxNodeVersion = currentNodes.getMaxNodeVersion(); - ensureNodesCompatibility(joiningNodeVersion, minNodeVersion, maxNodeVersion); + ensureNodesCompatibility(joiningNode, currentNodes, metadata, minNodeVersion, maxNodeVersion); } /** - * ensures that the joining node has a version that's compatible with a given version range + * ensures that the joining node has a version that's compatible with a given version range and ensures that the + * joining node has required attributes to join a remotestore cluster. */ - public static void ensureNodesCompatibility(Version joiningNodeVersion, Version minClusterNodeVersion, Version maxClusterNodeVersion) { + public static void ensureNodesCompatibility( + DiscoveryNode joiningNode, + DiscoveryNodes currentNodes, + Metadata metadata, + Version minClusterNodeVersion, + Version maxClusterNodeVersion + ) { + Version joiningNodeVersion = joiningNode.getVersion(); assert minClusterNodeVersion.onOrBefore(maxClusterNodeVersion) : minClusterNodeVersion + " > " + maxClusterNodeVersion; if (joiningNodeVersion.isCompatible(maxClusterNodeVersion) == false) { throw new IllegalStateException( @@ -456,6 +503,8 @@ public static void ensureNodesCompatibility(Version joiningNodeVersion, Version + "], which is incompatible." ); } + + ensureRemoteStoreNodesCompatibility(joiningNode, currentNodes, metadata); } /** @@ -488,12 +537,65 @@ public static void ensureNodeCommissioned(DiscoveryNode node, Metadata metadata) } } + /** + * The method ensures homogeneity - + * 1. The joining node has to be a remote store backed if it's joining a remote store backed cluster. Validates + * remote store attributes of joining node against the existing nodes of cluster. + * 2. The joining node has to be a non-remote store backed if it is joining a non-remote store backed cluster. + * Validates no remote store attributes are present in joining node as existing nodes in the cluster doesn't have + * remote store attributes. + *

                  + * A remote store backed node is the one which holds all the remote store attributes and a remote store backed + * cluster is the one which has only homogeneous remote store backed nodes with same node attributes + *

                  + * TODO: When we support moving from remote store cluster to non remote store and vice versa the this logic will + * needs to be modified. + */ + private static void ensureRemoteStoreNodesCompatibility(DiscoveryNode joiningNode, DiscoveryNodes currentNodes, Metadata metadata) { + List existingNodes = new ArrayList<>(currentNodes.getNodes().values()); + + assert existingNodes.isEmpty() == false; + + // TODO: The below check is valid till we don't support migration, once we start supporting migration a remote + // store node will be able to join a non remote store cluster and vice versa. #7986 + CompatibilityMode remoteStoreCompatibilityMode = REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get(metadata.settings()); + if (STRICT.equals(remoteStoreCompatibilityMode)) { + DiscoveryNode existingNode = existingNodes.get(0); + if (joiningNode.isRemoteStoreNode()) { + if (existingNode.isRemoteStoreNode()) { + RemoteStoreNodeAttribute joiningRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(joiningNode); + RemoteStoreNodeAttribute existingRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(existingNode); + if (existingRemoteStoreNodeAttribute.equals(joiningRemoteStoreNodeAttribute) == false) { + throw new IllegalStateException( + "a remote store node [" + + joiningNode + + "] is trying to join a remote store cluster with incompatible node attributes in " + + "comparison with existing node [" + + existingNode + + "]" + ); + } + } else { + throw new IllegalStateException( + "a remote store node [" + joiningNode + "] is trying to join a non remote store cluster" + ); + } + } else { + if (existingNode.isRemoteStoreNode()) { + throw new IllegalStateException( + "a non remote store node [" + joiningNode + "] is trying to join a remote store cluster" + ); + } + } + } + } + public static Collection> addBuiltInJoinValidators( Collection> onJoinValidators ) { final Collection> validators = new ArrayList<>(); validators.add((node, state) -> { - ensureNodesCompatibility(node.getVersion(), state.getNodes()); + ensureNodesCompatibility(node, state.getNodes(), state.metadata()); ensureIndexCompatibility(node.getVersion(), state.getMetadata()); ensureNodeCommissioned(node, state.getMetadata()); }); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateRegistry.java b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateRegistry.java new file mode 100644 index 0000000000000..470ab02a682a8 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateRegistry.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.coordination; + +import org.opensearch.cluster.coordination.CoordinationState.PersistedState; +import org.opensearch.common.util.io.IOUtils; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * A class which encapsulates the PersistedStates + * + * @opensearch.internal + */ +public class PersistedStateRegistry implements Closeable { + + public PersistedStateRegistry() {} + + /** + * Distinct Types PersistedState which can be present on a node + */ + public enum PersistedStateType { + LOCAL, + REMOTE; + } + + private final Map persistedStates = new ConcurrentHashMap<>(); + + public void addPersistedState(PersistedStateType persistedStateType, PersistedState persistedState) { + PersistedState existingState = this.persistedStates.putIfAbsent(persistedStateType, persistedState); + assert existingState == null : "should only be set once, but already have " + existingState; + } + + public PersistedState getPersistedState(PersistedStateType persistedStateType) { + return this.persistedStates.get(persistedStateType); + } + + @Override + public void close() throws IOException { + IOUtils.close(persistedStates.values()); + } + +} diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java new file mode 100644 index 0000000000000..4d466c4b3ad73 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/coordination/PersistedStateStats.java @@ -0,0 +1,132 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.coordination; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Persisted cluster state related stats. + * + * @opensearch.internal + */ +public class PersistedStateStats implements Writeable, ToXContentObject { + private final String statsName; + private AtomicLong totalTimeInMillis = new AtomicLong(0); + private AtomicLong failedCount = new AtomicLong(0); + private AtomicLong successCount = new AtomicLong(0); + private Map extendedFields = new HashMap<>(); // keeping minimal extensibility + + public PersistedStateStats(String statsName) { + this.statsName = statsName; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(statsName); + out.writeVLong(successCount.get()); + out.writeVLong(failedCount.get()); + out.writeVLong(totalTimeInMillis.get()); + if (extendedFields.size() > 0) { + out.writeBoolean(true); + out.writeVInt(extendedFields.size()); + for (Map.Entry extendedField : extendedFields.entrySet()) { + out.writeString(extendedField.getKey()); + out.writeVLong(extendedField.getValue().get()); + } + } else { + out.writeBoolean(false); + } + } + + public PersistedStateStats(StreamInput in) throws IOException { + this.statsName = in.readString(); + this.successCount = new AtomicLong(in.readVLong()); + this.failedCount = new AtomicLong(in.readVLong()); + this.totalTimeInMillis = new AtomicLong(in.readVLong()); + if (in.readBoolean()) { + int extendedFieldsSize = in.readVInt(); + this.extendedFields = new HashMap<>(); + for (int fieldNumber = 0; fieldNumber < extendedFieldsSize; fieldNumber++) { + extendedFields.put(in.readString(), new AtomicLong(in.readVLong())); + } + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(statsName); + builder.field(Fields.SUCCESS_COUNT, getSuccessCount()); + builder.field(Fields.FAILED_COUNT, getFailedCount()); + builder.field(Fields.TOTAL_TIME_IN_MILLIS, getTotalTimeInMillis()); + if (extendedFields.size() > 0) { + for (Map.Entry extendedField : extendedFields.entrySet()) { + builder.field(extendedField.getKey(), extendedField.getValue().get()); + } + } + builder.endObject(); + return builder; + } + + public void stateFailed() { + failedCount.incrementAndGet(); + } + + public void stateSucceeded() { + successCount.incrementAndGet(); + } + + /** + * Expects user to send time taken in milliseconds. + * + * @param timeTakenInUpload time taken in uploading the cluster state to remote + */ + public void stateTook(long timeTakenInUpload) { + totalTimeInMillis.addAndGet(timeTakenInUpload); + } + + public long getTotalTimeInMillis() { + return totalTimeInMillis.get(); + } + + public long getFailedCount() { + return failedCount.get(); + } + + public long getSuccessCount() { + return successCount.get(); + } + + protected void addToExtendedFields(String extendedField, AtomicLong extendedFieldValue) { + this.extendedFields.put(extendedField, extendedFieldValue); + } + + public String getStatsName() { + return statsName; + } + + /** + * Fields for parsing and toXContent + * + * @opensearch.internal + */ + static final class Fields { + static final String SUCCESS_COUNT = "success_count"; + static final String TOTAL_TIME_IN_MILLIS = "total_time_in_millis"; + static final String FAILED_COUNT = "failed_count"; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java index 1570a84ab871f..128bd42fd7947 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java @@ -63,7 +63,7 @@ public class Reconfigurator { * and still the cluster would be unavailable. However not reducing the voting configuration size can also hamper resilience: in a * five-node cluster we could lose two nodes and by reducing the voting configuration to the remaining three nodes we could tolerate the * loss of a further node before failing. - * + *

                  * We offer two options: either we auto-shrink the voting configuration as long as it contains more than three nodes, or we don't and we * require the user to control the voting configuration manually using the retirement API. The former, default, option, guarantees that * as long as there have been at least three cluster-manager-eligible nodes in the cluster and no more than one of them is currently unavailable, diff --git a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java index 188ea1325e806..168ae5212888f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapClusterManagerCommand.java @@ -53,6 +53,8 @@ import java.util.Locale; import java.util.Objects; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; + /** * Tool to run an unsafe bootstrap * @@ -81,7 +83,11 @@ public class UnsafeBootstrapClusterManagerCommand extends OpenSearchNodeCommand static final Setting UNSAFE_BOOTSTRAP = ClusterService.USER_DEFINED_METADATA.getConcreteSetting( "cluster.metadata.unsafe-bootstrap" ); - + static final String REMOTE_CLUSTER_STATE_ENABLED_NODE = + "Unsafe bootstrap cannot be performed when remote cluster state is enabled. The cluster state in the remote store is considered the source of truth. " + + "In case, you still wish to do best effort recovery with unsafe-bootstrap, then please disable the " + + REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey() + + ". For more details, please check the OpenSearch documentation."; private OptionSpec applyClusterReadOnlyBlockOption; UnsafeBootstrapClusterManagerCommand() { @@ -101,6 +107,13 @@ protected boolean validateBeforeLock(Terminal terminal, Environment env) { if (clusterManager == false) { throw new OpenSearchException(NOT_CLUSTER_MANAGER_NODE_MSG); } + // During unsafe bootstrap, node will form a cluster with a new cluster UUID but with the existing metadata. + // This new state will not know about the previous cluster UUIDs and so we will not able to construct + // the cluster UUID chain to get the last known cluster UUID to restore from. + // Blocking unsafe-bootstrap below for this reason. + if (REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings) == true) { + throw new OpenSearchException(REMOTE_CLUSTER_STATE_ENABLED_NODE); + } return true; } diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java index d06e89d9ea170..cbc63565228f9 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java @@ -219,13 +219,13 @@ public void writeTo(final StreamOutput out) throws IOException { /** * Checks if an inactive primary shard should cause the cluster health to go RED. - * + *

                  * An inactive primary shard in an index should cause the cluster health to be RED to make it visible that some of the existing data is * unavailable. In case of index creation, snapshot restore or index shrinking, which are unexceptional events in the cluster lifecycle, * cluster health should not turn RED for the time where primaries are still in the initializing state but go to YELLOW instead. * However, in case of exceptional events, for example when the primary shard cannot be assigned to a node or initialization fails at * some point, cluster health should still turn RED. - * + *

                  * NB: this method should *not* be called on active shards nor on non-primary shards. */ public static ClusterHealthStatus getInactivePrimaryHealth(final ShardRouting shardRouting) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java new file mode 100644 index 0000000000000..27803cb106005 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/CryptoMetadata.java @@ -0,0 +1,167 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.OpenSearchParseException; +import org.opensearch.action.admin.cluster.crypto.CryptoSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * Metadata about encryption and decryption + * + * @opensearch.internal + */ +public class CryptoMetadata implements Writeable { + static final public String CRYPTO_METADATA_KEY = "crypto_metadata"; + static final public String KEY_PROVIDER_NAME_KEY = "key_provider_name"; + static final public String KEY_PROVIDER_TYPE_KEY = "key_provider_type"; + static final public String SETTINGS_KEY = "settings"; + private final String keyProviderName; + private final String keyProviderType; + private final Settings settings; + + /** + * Constructs new crypto metadata + * + * @param keyProviderName key provider name + * @param keyProviderType key provider type + * @param settings crypto settings + */ + public CryptoMetadata(String keyProviderName, String keyProviderType, Settings settings) { + this.keyProviderName = keyProviderName; + this.keyProviderType = keyProviderType; + this.settings = settings; + } + + /** + * Returns key provider name + * + * @return Key provider name + */ + public String keyProviderName() { + return this.keyProviderName; + } + + /** + * Returns key provider type + * + * @return key provider type + */ + public String keyProviderType() { + return this.keyProviderType; + } + + /** + * Returns crypto settings + * + * @return crypto settings + */ + public Settings settings() { + return this.settings; + } + + public CryptoMetadata(StreamInput in) throws IOException { + keyProviderName = in.readString(); + keyProviderType = in.readString(); + settings = Settings.readSettingsFromStream(in); + } + + public static CryptoMetadata fromRequest(CryptoSettings cryptoSettings) { + if (cryptoSettings == null) { + return null; + } + return new CryptoMetadata(cryptoSettings.getKeyProviderName(), cryptoSettings.getKeyProviderType(), cryptoSettings.getSettings()); + } + + /** + * Writes crypto metadata to stream output + * + * @param out stream output + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(keyProviderName); + out.writeString(keyProviderType); + Settings.writeSettingsToStream(settings, out); + } + + public static CryptoMetadata fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token; + String keyProviderType = null; + Settings settings = null; + String keyProviderName = parser.currentName(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + if (KEY_PROVIDER_NAME_KEY.equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new OpenSearchParseException("failed to parse crypto metadata [{}], unknown type"); + } + keyProviderName = parser.text(); + } else if (KEY_PROVIDER_TYPE_KEY.equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new OpenSearchParseException("failed to parse crypto metadata [{}], unknown type"); + } + keyProviderType = parser.text(); + } else if (SETTINGS_KEY.equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new OpenSearchParseException("failed to parse crypto metadata [{}], unknown type"); + } + settings = Settings.fromXContent(parser); + } else { + throw new OpenSearchParseException("failed to parse crypto metadata, unknown field [{}]", currentFieldName); + } + } else { + throw new OpenSearchParseException("failed to parse repositories"); + } + } + return new CryptoMetadata(keyProviderName, keyProviderType, settings); + } + + public void toXContent(CryptoMetadata cryptoMetadata, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(CRYPTO_METADATA_KEY); + builder.field(KEY_PROVIDER_NAME_KEY, cryptoMetadata.keyProviderName()); + builder.field(KEY_PROVIDER_TYPE_KEY, cryptoMetadata.keyProviderType()); + builder.startObject(SETTINGS_KEY); + cryptoMetadata.settings().toXContent(builder, params); + builder.endObject(); + builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + CryptoMetadata that = (CryptoMetadata) o; + + if (!keyProviderName.equals(that.keyProviderName)) return false; + if (!keyProviderType.equals(that.keyProviderType)) return false; + return settings.equals(that.settings); + } + + @Override + public int hashCode() { + return Objects.hash(keyProviderName, keyProviderType, settings); + } + + @Override + public String toString() { + return "CryptoMetadata{" + keyProviderName + "}{" + keyProviderType + "}{" + settings + "}"; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java index 0da948dc78c5d..85a203e5e059a 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java @@ -61,7 +61,7 @@ /** * A collection of tombstones for explicitly marking indices as deleted in the cluster state. - * + *

                  * The cluster state contains a list of index tombstones for indices that have been * deleted in the cluster. Because cluster states are processed asynchronously by * nodes and a node could be removed from the cluster for a period of time, the @@ -250,7 +250,7 @@ public int getNumPurged() { /** * Purge tombstone entries. Returns the number of entries that were purged. - * + *

                  * Tombstones are purged if the number of tombstones in the list * is greater than the input parameter of maximum allowed tombstones. * Tombstones are purged until the list is equal to the maximum allowed. diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index b2c776df130b6..697b6a5583ea7 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -781,7 +781,7 @@ public long getAliasesVersion() { /** * The term of the current selected primary. This is a non-negative number incremented when * a primary shard is assigned after a full cluster restart or a replica shard is promoted to a primary. - * + *

                  * Note: since we increment the term every time a shard is assigned, the term for any operational shard (i.e., a shard * that can be indexed into) is larger than 0. See {@link IndexMetadataUpdater#applyChanges}. **/ @@ -1905,7 +1905,7 @@ public static Settings addHumanReadableSettings(Settings settings) { /** * Return the version the index was created from the provided index settings - * + *

                  * This looks for the presence of the {@link Version} object with key {@link IndexMetadata#SETTING_VERSION_CREATED} */ public static Version indexCreated(final Settings indexSettings) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java index aad91af8e85f8..86961010c77d6 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexNameExpressionResolver.java @@ -40,6 +40,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.regex.Regex; @@ -78,8 +79,9 @@ /** * Resolves index name from an expression * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexNameExpressionResolver { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(IndexNameExpressionResolver.class); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java index 3074719ffa179..c5efb55316b84 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.logging.DeprecationLogger; @@ -63,8 +64,9 @@ /** * Metadata for Index Templates * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class IndexTemplateMetadata extends AbstractDiffable { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(IndexTemplateMetadata.class); @@ -367,7 +369,7 @@ public IndexTemplateMetadata build() { /** * Serializes the template to xContent, using the legacy format where the mappings are * nested under the type name. - * + *

                  * This method is used for serializing templates before storing them in the cluster metadata, * and also in the REST layer when returning a deprecated typed response. */ @@ -384,7 +386,7 @@ public static void toXContentWithTypes( /** * Serializes the template to xContent, making sure not to nest mappings under the * type name. - * + *

                  * Note that this method should currently only be used for creating REST responses, * and not when directly updating stored templates. Index templates are still stored * in the old, typed format, and have yet to be migrated to be typeless. diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index adaef318896e9..baa276688725e 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -67,6 +67,7 @@ import org.opensearch.core.xcontent.XContentParser; import org.opensearch.gateway.MetadataStateFormat; import org.opensearch.index.IndexNotFoundException; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.MapperPlugin; import java.io.IOException; @@ -108,6 +109,22 @@ public class Metadata implements Iterable, Diffable, To public static final String UNKNOWN_CLUSTER_UUID = Strings.UNKNOWN_UUID_VALUE; public static final Pattern NUMBER_PATTERN = Pattern.compile("[0-9]+$"); + /** + * Utility to identify whether input index uses SEGMENT replication strategy in established cluster state metadata. + * Note: Method intended for use by other plugins as well. + * + * @param indexName Index name + * @return true if index uses SEGMENT replication, false otherwise + */ + public boolean isSegmentReplicationEnabled(String indexName) { + return Optional.ofNullable(index(indexName)) + .map( + indexMetadata -> ReplicationType.parseString(indexMetadata.getSettings().get(IndexMetadata.SETTING_REPLICATION_TYPE)) + .equals(ReplicationType.SEGMENT) + ) + .orElse(false); + } + /** * Context of the XContent. * @@ -906,19 +923,26 @@ public static boolean isGlobalStateEquals(Metadata metadata1, Metadata metadata2 if (!metadata1.coordinationMetadata.equals(metadata2.coordinationMetadata)) { return false; } - if (!metadata1.persistentSettings.equals(metadata2.persistentSettings)) { + if (!metadata1.hashesOfConsistentSettings.equals(metadata2.hashesOfConsistentSettings)) { return false; } - if (!metadata1.hashesOfConsistentSettings.equals(metadata2.hashesOfConsistentSettings)) { + if (!metadata1.clusterUUID.equals(metadata2.clusterUUID)) { return false; } - if (!metadata1.templates.equals(metadata2.templates())) { + if (metadata1.clusterUUIDCommitted != metadata2.clusterUUIDCommitted) { return false; } - if (!metadata1.clusterUUID.equals(metadata2.clusterUUID)) { + return isGlobalResourcesMetadataEquals(metadata1, metadata2); + } + + /** + * Compares Metadata entities persisted in Remote Store. + */ + public static boolean isGlobalResourcesMetadataEquals(Metadata metadata1, Metadata metadata2) { + if (!metadata1.persistentSettings.equals(metadata2.persistentSettings)) { return false; } - if (metadata1.clusterUUIDCommitted != metadata2.clusterUUIDCommitted) { + if (!metadata1.templates.equals(metadata2.templates())) { return false; } // Check if any persistent metadata needs to be saved @@ -1678,7 +1702,7 @@ private SortedMap buildIndicesLookup() { /** * Validates there isn't any index with a name that would clash with the future backing indices of the existing data streams. - * + *

                  * E.g., if data stream `foo` has backing indices [`.ds-foo-000001`, `.ds-foo-000002`] and the indices lookup contains indices * `.ds-foo-000001`, `.ds-foo-000002` and `.ds-foo-000006` this will throw an IllegalStateException (as attempting to rollover the * `foo` data stream from generation 5 to 6 will not be possible) diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index a8505c45fb27a..5cc31337374f8 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -69,9 +69,11 @@ import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.io.PathUtils; import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; @@ -87,11 +89,15 @@ import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.shard.IndexSettingProvider; +import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndexCreationException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidIndexNameException; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndices; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -130,10 +136,8 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.cluster.metadata.Metadata.DEFAULT_REPLICA_COUNT_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreAttributePresent; /** * Service responsible for submitting create index requests @@ -573,7 +577,8 @@ private ClusterState applyCreateIndexRequestWithV1Templates( settings, indexScopedSettings, shardLimitValidator, - indexSettingProviders + indexSettingProviders, + clusterService.getClusterSettings() ); int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, null); IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards); @@ -637,7 +642,8 @@ private ClusterState applyCreateIndexRequestWithV2Template( settings, indexScopedSettings, shardLimitValidator, - indexSettingProviders + indexSettingProviders, + clusterService.getClusterSettings() ); int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, null); IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards); @@ -717,7 +723,8 @@ private ClusterState applyCreateIndexRequestWithExistingMetadata( settings, indexScopedSettings, shardLimitValidator, - indexSettingProviders + indexSettingProviders, + clusterService.getClusterSettings() ); final int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, sourceMetadata); IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards); @@ -748,7 +755,7 @@ private ClusterState applyCreateIndexRequestWithExistingMetadata( /** * Parses the provided mappings json and the inheritable mappings from the templates (if any) * into a map. - * + *

                  * The template mappings are applied in the order they are encountered in the list (clients * should make sure the lower index, closer to the head of the list, templates have the highest * {@link IndexTemplateMetadata#order()}). This merging makes no distinction between field @@ -786,7 +793,7 @@ static Map parseV1Mappings( * Validates and creates the settings for the new index based on the explicitly configured settings via the * {@link CreateIndexClusterStateUpdateRequest}, inherited from templates and, if recovering from another index (ie. split, shrink, * clone), the resize settings. - * + *

                  * The template mappings are applied in the order they are encountered in the list (clients should make sure the lower index, closer * to the head of the list, templates have the highest {@link IndexTemplateMetadata#order()}) * @@ -800,7 +807,8 @@ static Settings aggregateIndexSettings( Settings settings, IndexScopedSettings indexScopedSettings, ShardLimitValidator shardLimitValidator, - Set indexSettingProviders + Set indexSettingProviders, + ClusterSettings clusterSettings ) { // Create builders for the template and request settings. We transform these into builders // because we may want settings to be "removed" from these prior to being set on the new @@ -921,38 +929,51 @@ static Settings aggregateIndexSettings( } validateTranslogRetentionSettings(indexSettings); validateStoreTypeSettings(indexSettings); + validateRefreshIntervalSettings(request.settings(), clusterSettings); + validateTranslogDurabilitySettings(request.settings(), clusterSettings, settings); return indexSettings; } /** - * Updates index settings to set replication strategy by default based on cluster level settings + * Updates index settings to set replication strategy by default based on cluster level settings or remote store + * node attributes * @param settingsBuilder index settings builder to be updated with relevant settings * @param requestSettings settings passed in during index create request * @param clusterSettings cluster level settings */ private static void updateReplicationStrategy(Settings.Builder settingsBuilder, Settings requestSettings, Settings clusterSettings) { - if (CLUSTER_REPLICATION_TYPE_SETTING.exists(clusterSettings) && INDEX_REPLICATION_TYPE_SETTING.exists(requestSettings) == false) { - settingsBuilder.put(SETTING_REPLICATION_TYPE, CLUSTER_REPLICATION_TYPE_SETTING.get(clusterSettings)); - return; - } - if (INDEX_REPLICATION_TYPE_SETTING.exists(requestSettings) == true) { + if (INDEX_REPLICATION_TYPE_SETTING.exists(requestSettings)) { settingsBuilder.put(SETTING_REPLICATION_TYPE, INDEX_REPLICATION_TYPE_SETTING.get(requestSettings)); - return; + } else if (CLUSTER_REPLICATION_TYPE_SETTING.exists(clusterSettings)) { + settingsBuilder.put(SETTING_REPLICATION_TYPE, CLUSTER_REPLICATION_TYPE_SETTING.get(clusterSettings)); + } else if (isRemoteStoreAttributePresent(clusterSettings)) { + settingsBuilder.put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); + } else { + settingsBuilder.put(SETTING_REPLICATION_TYPE, CLUSTER_REPLICATION_TYPE_SETTING.getDefault(clusterSettings)); } - settingsBuilder.put(SETTING_REPLICATION_TYPE, CLUSTER_REPLICATION_TYPE_SETTING.getDefault(clusterSettings)); } /** - * Updates index settings to enable remote store by default based on cluster level settings + * Updates index settings to enable remote store by default based on node attributes * @param settingsBuilder index settings builder to be updated with relevant settings * @param clusterSettings cluster level settings */ private static void updateRemoteStoreSettings(Settings.Builder settingsBuilder, Settings clusterSettings) { - if (CLUSTER_REMOTE_STORE_ENABLED_SETTING.get(clusterSettings) == true) { + if (isRemoteStoreAttributePresent(clusterSettings)) { settingsBuilder.put(SETTING_REMOTE_STORE_ENABLED, true) - .put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.get(clusterSettings)) - .put(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.get(clusterSettings)); + .put( + SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, + clusterSettings.get( + Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY + ) + ) + .put( + SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, + clusterSettings.get( + Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY + ) + ); } } @@ -1010,7 +1031,7 @@ static int getIndexNumberOfRoutingShards(Settings indexSettings, @Nullable Index /** * Validate and resolve the aliases explicitly set for the index, together with the ones inherited from the specified * templates. - * + *

                  * The template mappings are applied in the order they are encountered in the list (clients should make sure the lower index, closer * to the head of the list, templates have the highest {@link IndexTemplateMetadata#order()}) * @@ -1253,6 +1274,7 @@ List getIndexSettingsValidationErrors( if (forbidPrivateIndexSettings) { validationErrors.addAll(validatePrivateSettingsNotExplicitlySet(settings, indexScopedSettings)); } + validateIndexReplicationTypeSettings(settings, clusterService.getClusterSettings()).ifPresent(validationErrors::add); if (indexName.isEmpty() || indexName.get().charAt(0) != '.') { // Apply aware replica balance validation only to non system indices int replicaCount = settings.getAsInt( @@ -1307,6 +1329,24 @@ private static List validateIndexCustomPath(Settings settings, @Nullable return validationErrors; } + /** + * Validates {@code index.replication.type} is not set if {@code cluster.restrict.index.replication_type} is set to true. + * + * @param requestSettings settings passed in during index create request + * @param clusterSettings cluster setting + */ + private static Optional validateIndexReplicationTypeSettings(Settings requestSettings, ClusterSettings clusterSettings) { + if (requestSettings.hasValue(SETTING_REPLICATION_TYPE) + && clusterSettings.get(IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING)) { + return Optional.of( + "index setting [index.replication.type] is not allowed to be set as [" + + IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey() + + "=true]" + ); + } + return Optional.empty(); + } + /** * Validates the settings and mappings for shrinking an index. * @@ -1494,4 +1534,50 @@ public static void validateTranslogRetentionSettings(Settings indexSettings) { } } } + + /** + * Validates {@code index.refresh_interval} is equal or below the {@code cluster.minimum.index.refresh_interval}. + * + * @param requestSettings settings passed in during index create/update request + * @param clusterSettings cluster setting + */ + static void validateRefreshIntervalSettings(Settings requestSettings, ClusterSettings clusterSettings) { + if (IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.exists(requestSettings) == false) { + return; + } + TimeValue requestRefreshInterval = IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.get(requestSettings); + TimeValue clusterMinimumRefreshInterval = clusterSettings.get(IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING); + if (requestRefreshInterval.millis() < clusterMinimumRefreshInterval.millis()) { + throw new IllegalArgumentException( + "invalid index.refresh_interval [" + + requestRefreshInterval + + "]: cannot be smaller than cluster.minimum.index.refresh_interval [" + + clusterMinimumRefreshInterval + + "]" + ); + } + } + + /** + * Validates {@code index.translog.durability} is not async if the {@code cluster.remote_store.index.restrict.async-durability} is set to true. + * + * @param requestSettings settings passed in during index create/update request + * @param clusterSettings cluster setting + */ + static void validateTranslogDurabilitySettings(Settings requestSettings, ClusterSettings clusterSettings, Settings settings) { + if (isRemoteStoreAttributePresent(settings) == false + || IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.exists(requestSettings) == false + || clusterSettings.get(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING) == false) { + return; + } + Translog.Durability durability = IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.get(requestSettings); + if (durability.equals(Translog.Durability.ASYNC)) { + throw new IllegalArgumentException( + "index setting [index.translog.durability=async] is not allowed as cluster setting [" + + IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey() + + "=true]" + ); + } + + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java index dc98b60484b20..8f1fa4a23d8fc 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java @@ -167,7 +167,7 @@ public MetadataIndexStateService( /** * Closes one or more indices. - * + *

                  * Closing indices is a 3 steps process: it first adds a write block to every indices to close, then waits for the operations on shards * to be terminated and finally closes the indices by moving their state to CLOSE. */ @@ -303,7 +303,7 @@ public TimeValue timeout() { /** * Step 1 - Start closing indices by adding a write block - * + *

                  * This step builds the list of indices to close (the ones explicitly requested that are not in CLOSE state) and adds a unique cluster * block (or reuses an existing one) to every index to close in the cluster state. After the cluster state is published, the shards * should start to reject writing operations and we can proceed with step 2. diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 71b86ec853ce4..1093ac09777e7 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -747,7 +747,7 @@ public static Map> findConflictingV2Templates( /** * Return a map of v2 template names to their index patterns for v2 templates that would overlap * with the given template's index patterns. - * + *

                  * Based on the provided checkPriority and priority parameters this aims to report the overlapping * index templates regardless of the priority (ie. checkPriority == false) or otherwise overlapping * templates with the same priority as the given priority parameter (this is useful when trying to diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index 0221e8ec6636d..7d4c3512ed757 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -73,6 +73,9 @@ import java.util.Set; import static org.opensearch.action.support.ContextPreservingActionListener.wrapPreservingContext; +import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; +import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateTranslogDurabilitySettings; +import static org.opensearch.common.settings.AbstractScopedSettings.ARCHIVED_SETTINGS_PREFIX; import static org.opensearch.index.IndexSettings.same; /** @@ -125,6 +128,10 @@ public void updateSettings( .put(request.settings()) .normalizePrefix(IndexMetadata.INDEX_SETTING_PREFIX) .build(); + + validateRefreshIntervalSettings(normalizedSettings, clusterService.getClusterSettings()); + validateTranslogDurabilitySettings(normalizedSettings, clusterService.getClusterSettings(), clusterService.getSettings()); + Settings.Builder settingsForClosedIndices = Settings.builder(); Settings.Builder settingsForOpenIndices = Settings.builder(); final Set skippedSettings = new HashSet<>(); @@ -132,12 +139,16 @@ public void updateSettings( indexScopedSettings.validate( normalizedSettings.filter(s -> Regex.isSimpleMatchPattern(s) == false), // don't validate wildcards false, // don't validate dependencies here we check it below never allow to change the number of shards + false, + true, // Ignore archived setting. true ); // validate internal or private index settings for (String key : normalizedSettings.keySet()) { Setting setting = indexScopedSettings.get(key); boolean isWildcard = setting == null && Regex.isSimpleMatchPattern(key); + boolean isArchived = key.startsWith(ARCHIVED_SETTINGS_PREFIX); assert setting != null // we already validated the normalized settings + || isArchived || (isWildcard && normalizedSettings.hasValue(key) == false) : "unknown setting: " + key + " isWildcard: " @@ -145,7 +156,8 @@ public void updateSettings( + " hasValue: " + normalizedSettings.hasValue(key); settingsForClosedIndices.copy(key, normalizedSettings); - if (isWildcard || setting.isDynamic()) { + // Only allow dynamic settings and wildcards for open indices. Skip archived settings. + if (isArchived == false && (isWildcard || setting.isDynamic())) { settingsForOpenIndices.copy(key, normalizedSettings); } else { skippedSettings.add(key); @@ -305,6 +317,8 @@ public ClusterState execute(ClusterState currentState) { Settings finalSettings = indexSettings.build(); indexScopedSettings.validate( finalSettings.filter(k -> indexScopedSettings.isPrivateSetting(k) == false), + true, + false, true ); metadataBuilder.put(IndexMetadata.builder(indexMetadata).settings(finalSettings)); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java index 90dba581df33b..e3689d046193c 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java @@ -54,6 +54,8 @@ import java.util.EnumSet; import java.util.List; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; + /** * Contains metadata about registered snapshot repositories * @@ -68,6 +70,7 @@ public class RepositoriesMetadata extends AbstractNamedDiffable implemen * in {@link org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse}. */ public static final String HIDE_GENERATIONS_PARAM = "hide_generations"; + public static final String HIDE_SYSTEM_REPOSITORY_SETTING = "hide_system_repository_setting"; private final List repositories; @@ -208,6 +211,7 @@ public static RepositoriesMetadata fromXContent(XContentParser parser) throws IO Settings settings = Settings.EMPTY; long generation = RepositoryData.UNKNOWN_REPO_GEN; long pendingGeneration = RepositoryData.EMPTY_REPO_GEN; + CryptoMetadata cryptoMetadata = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { String currentFieldName = parser.currentName(); @@ -231,6 +235,11 @@ public static RepositoriesMetadata fromXContent(XContentParser parser) throws IO throw new OpenSearchParseException("failed to parse repository [{}], unknown type", name); } pendingGeneration = parser.longValue(); + } else if (CryptoMetadata.CRYPTO_METADATA_KEY.equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new OpenSearchParseException("failed to parse repository [{}], unknown type", name); + } + cryptoMetadata = CryptoMetadata.fromXContent(parser); } else { throw new OpenSearchParseException( "failed to parse repository [{}], unknown field [{}]", @@ -245,7 +254,7 @@ public static RepositoriesMetadata fromXContent(XContentParser parser) throws IO if (type == null) { throw new OpenSearchParseException("failed to parse repository [{}], missing repository type", name); } - repository.add(new RepositoryMetadata(name, type, settings, generation, pendingGeneration)); + repository.add(new RepositoryMetadata(name, type, settings, generation, pendingGeneration, cryptoMetadata)); } else { throw new OpenSearchParseException("failed to parse repositories"); } @@ -279,8 +288,15 @@ public EnumSet context() { public static void toXContent(RepositoryMetadata repository, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(repository.name()); builder.field("type", repository.type()); + if (repository.cryptoMetadata() != null) { + repository.cryptoMetadata().toXContent(repository.cryptoMetadata(), builder, params); + } + Settings settings = repository.settings(); + if (SYSTEM_REPOSITORY_SETTING.get(settings) && params.paramAsBoolean(HIDE_SYSTEM_REPOSITORY_SETTING, false)) { + settings = repository.settings().filter(s -> !s.equals(SYSTEM_REPOSITORY_SETTING.getKey())); + } builder.startObject("settings"); - repository.settings().toXContent(builder, params); + settings.toXContent(builder, params); builder.endObject(); if (params.paramAsBoolean(HIDE_GENERATIONS_PARAM, false) == false) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java index 0cbaec7700dfd..58637df4efe8f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/RepositoryMetadata.java @@ -54,6 +54,7 @@ public class RepositoryMetadata implements Writeable { private final String name; private final String type; private final Settings settings; + private final CryptoMetadata cryptoMetadata; /** * Safe repository generation. @@ -73,14 +74,29 @@ public class RepositoryMetadata implements Writeable { * @param settings repository settings */ public RepositoryMetadata(String name, String type, Settings settings) { - this(name, type, settings, RepositoryData.UNKNOWN_REPO_GEN, RepositoryData.EMPTY_REPO_GEN); + this(name, type, settings, RepositoryData.UNKNOWN_REPO_GEN, RepositoryData.EMPTY_REPO_GEN, null); + } + + public RepositoryMetadata(String name, String type, Settings settings, CryptoMetadata cryptoMetadata) { + this(name, type, settings, RepositoryData.UNKNOWN_REPO_GEN, RepositoryData.EMPTY_REPO_GEN, cryptoMetadata); } public RepositoryMetadata(RepositoryMetadata metadata, long generation, long pendingGeneration) { - this(metadata.name, metadata.type, metadata.settings, generation, pendingGeneration); + this(metadata.name, metadata.type, metadata.settings, generation, pendingGeneration, metadata.cryptoMetadata); } public RepositoryMetadata(String name, String type, Settings settings, long generation, long pendingGeneration) { + this(name, type, settings, generation, pendingGeneration, null); + } + + public RepositoryMetadata( + String name, + String type, + Settings settings, + long generation, + long pendingGeneration, + CryptoMetadata cryptoMetadata + ) { this.name = name; this.type = type; this.settings = settings; @@ -91,6 +107,7 @@ public RepositoryMetadata(String name, String type, Settings settings, long gene + "] must be greater or equal to generation [" + generation + "]"; + this.cryptoMetadata = cryptoMetadata; } /** @@ -120,6 +137,15 @@ public Settings settings() { return this.settings; } + /** + * Returns crypto metadata of repository + * + * @return crypto metadata of repository + */ + public CryptoMetadata cryptoMetadata() { + return this.cryptoMetadata; + } + /** * Returns the safe repository generation. {@link RepositoryData} for this generation is assumed to exist in the repository. * All operations on the repository must be based on the {@link RepositoryData} at this generation. @@ -155,6 +181,11 @@ public RepositoryMetadata(StreamInput in) throws IOException { generation = RepositoryData.UNKNOWN_REPO_GEN; pendingGeneration = RepositoryData.EMPTY_REPO_GEN; } + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { + cryptoMetadata = in.readOptionalWriteable(CryptoMetadata::new); + } else { + cryptoMetadata = null; + } } /** @@ -171,6 +202,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(generation); out.writeLong(pendingGeneration); } + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { + out.writeOptionalWriteable(cryptoMetadata); + } } /** @@ -180,7 +214,10 @@ public void writeTo(StreamOutput out) throws IOException { * @return {@code true} if both instances equal in all fields but the generation fields */ public boolean equalsIgnoreGenerations(RepositoryMetadata other) { - return name.equals(other.name) && type.equals(other.type()) && settings.equals(other.settings()); + return name.equals(other.name) + && type.equals(other.type()) + && settings.equals(other.settings()) + && Objects.equals(cryptoMetadata, other.cryptoMetadata()); } @Override @@ -194,16 +231,21 @@ public boolean equals(Object o) { if (!type.equals(that.type)) return false; if (generation != that.generation) return false; if (pendingGeneration != that.pendingGeneration) return false; - return settings.equals(that.settings); + if (!settings.equals(that.settings)) return false; + return Objects.equals(cryptoMetadata, that.cryptoMetadata); } @Override public int hashCode() { - return Objects.hash(name, type, settings, generation, pendingGeneration); + return Objects.hash(name, type, settings, generation, pendingGeneration, cryptoMetadata); } @Override public String toString() { - return "RepositoryMetadata{" + name + "}{" + type + "}{" + settings + "}{" + generation + "}{" + pendingGeneration + "}"; + String toStr = "RepositoryMetadata{" + name + "}{" + type + "}{" + settings + "}{" + generation + "}{" + pendingGeneration + "}"; + if (cryptoMetadata != null) { + return toStr + "{" + cryptoMetadata + "}"; + } + return toStr; } } diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 9ba11152d1e10..fde6b6ee2fc79 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -35,6 +35,7 @@ import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; @@ -44,6 +45,7 @@ import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import java.io.IOException; import java.util.Collections; @@ -62,12 +64,14 @@ import java.util.stream.Stream; import static org.opensearch.node.NodeRoleSettings.NODE_ROLES_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX; /** * A discovery node represents a node that is part of the cluster. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class DiscoveryNode implements Writeable, ToXContentFragment { static final String COORDINATING_ONLY = "coordinating_only"; @@ -281,6 +285,27 @@ public static DiscoveryNode createLocal(Settings settings, TransportAddress publ return new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), nodeId, publishAddress, attributes, roles, Version.CURRENT); } + /** Creates a DiscoveryNode representing the local node and verifies the repository. */ + public static DiscoveryNode createRemoteNodeLocal( + Settings settings, + TransportAddress publishAddress, + String nodeId, + RemoteStoreNodeService remoteStoreNodeService + ) { + Map attributes = Node.NODE_ATTRIBUTES.getAsMap(settings); + Set roles = getRolesFromSettings(settings); + DiscoveryNode discoveryNode = new DiscoveryNode( + Node.NODE_NAME_SETTING.get(settings), + nodeId, + publishAddress, + attributes, + roles, + Version.CURRENT + ); + remoteStoreNodeService.createAndVerifyRepositories(discoveryNode); + return discoveryNode; + } + /** extract node roles from the given settings */ public static Set getRolesFromSettings(final Settings settings) { if (NODE_ROLES_SETTING.exists(settings)) { @@ -508,6 +533,15 @@ public boolean isSearchNode() { return roles.contains(DiscoveryNodeRole.SEARCH_ROLE); } + /** + * Returns whether the node is a remote store node. + * + * @return true if the node contains remote store node attributes, false otherwise + */ + public boolean isRemoteStoreNode() { + return this.getAttributes().keySet().stream().anyMatch(key -> key.startsWith(REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX)); + } + /** * Returns a set of all the roles that the node has. The roles are returned in sorted order by the role name. *

                  @@ -569,7 +603,13 @@ public String toString() { sb.append('}'); } if (!attributes.isEmpty()) { - sb.append(attributes); + sb.append( + attributes.entrySet() + .stream() + .filter(entry -> !entry.getKey().startsWith(REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX)) // filter remote_store attributes + // from logging to reduce noise. + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) + ); } return sb.toString(); } diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java index 07d70b2c6c1b2..0d2b08656c38d 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java @@ -35,6 +35,7 @@ import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.Booleans; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; @@ -52,8 +53,9 @@ /** * Represents a node role. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class DiscoveryNodeRole implements Comparable { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(DiscoveryNodeRole.class); diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java index 2461e82748ee8..2f73e38725737 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java @@ -369,7 +369,7 @@ public DiscoveryNode findByAddress(TransportAddress address) { /** * Returns the version of the node with the oldest version in the cluster that is not a client node - * + *

                  * If there are no non-client nodes, Version.CURRENT will be returned. * * @return the oldest version in the cluster @@ -380,7 +380,7 @@ public Version getSmallestNonClientNodeVersion() { /** * Returns the version of the node with the youngest version in the cluster that is not a client node. - * + *

                  * If there are no non-client nodes, Version.CURRENT will be returned. * * @return the youngest version in the cluster @@ -430,16 +430,16 @@ public DiscoveryNode resolveNode(String node) { /** * Resolves a set of nodes according to the given sequence of node specifications. Implements the logic in various APIs that allow the * user to run the action on a subset of the nodes in the cluster. See [Node specification] in the reference manual for full details. - * + *

                  * Works by tracking the current set of nodes and applying each node specification in sequence. The set starts out empty and each node * specification may either add or remove nodes. For instance: - * + *

                  * - _local, _cluster_manager (_master) and _all respectively add to the subset the local node, the currently-elected cluster_manager, and all the nodes * - node IDs, names, hostnames and IP addresses all add to the subset any nodes which match * - a wildcard-based pattern of the form "attr*:value*" adds to the subset all nodes with a matching attribute with a matching value * - role:true adds to the subset all nodes with a matching role * - role:false removes from the subset all nodes with a matching role. - * + *

                  * An empty sequence of node specifications returns all nodes, since the corresponding actions run on all nodes by default. */ public String[] resolveNodes(String... nodes) { @@ -826,7 +826,7 @@ public Builder localNodeId(String localNodeId) { * Checks that a node can be safely added to this node collection. * * @return null if all is OK or an error message explaining why a node can not be added. - * + *

                  * Note: if this method returns a non-null value, calling {@link #add(DiscoveryNode)} will fail with an * exception */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java index e4ffacd708632..d77d44580798a 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java @@ -453,7 +453,8 @@ public Builder initializeAsRestore(IndexMetadata indexMetadata, SnapshotRecovery public Builder initializeAsRemoteStoreRestore( IndexMetadata indexMetadata, RemoteStoreRecoverySource recoverySource, - Map activeInitializingShards + Map indexShardRoutingTableMap, + boolean forceRecoverAllPrimaries ) { final UnassignedInfo unassignedInfo = new UnassignedInfo( UnassignedInfo.Reason.EXISTING_INDEX_RESTORED, @@ -465,11 +466,33 @@ public Builder initializeAsRemoteStoreRestore( } for (int shardNumber = 0; shardNumber < indexMetadata.getNumberOfShards(); shardNumber++) { ShardId shardId = new ShardId(index, shardNumber); + if (indexShardRoutingTableMap.containsKey(shardId) == false) { + throw new IllegalStateException("IndexShardRoutingTable is not present for shardId: " + shardId); + } IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); - if (activeInitializingShards.containsKey(shardId)) { - indexShardRoutingBuilder.addShard(activeInitializingShards.get(shardId)); - } else { + IndexShardRoutingTable indexShardRoutingTable = indexShardRoutingTableMap.get(shardId); + if (forceRecoverAllPrimaries || indexShardRoutingTable.primaryShard().unassigned()) { + // Primary shard to be recovered from remote store. indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, true, recoverySource, unassignedInfo)); + // All the replica shards to be recovered from peer recovery. + for (int replicaNumber = 0; replicaNumber < indexMetadata.getNumberOfReplicas(); replicaNumber++) { + indexShardRoutingBuilder.addShard( + ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, unassignedInfo) + ); + } + } else { + // Primary is either active or initializing. Do not trigger restore. + indexShardRoutingBuilder.addShard(indexShardRoutingTable.primaryShard()); + // Replica, if unassigned, trigger peer recovery else no action. + for (ShardRouting shardRouting : indexShardRoutingTable.replicaShards()) { + if (shardRouting.unassigned()) { + indexShardRoutingBuilder.addShard( + ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, unassignedInfo) + ); + } else { + indexShardRoutingBuilder.addShard(shardRouting); + } + } } shards.put(shardNumber, indexShardRoutingBuilder.build()); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java index 9cc09c6e4c31c..2dd57431d0375 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java @@ -463,7 +463,7 @@ private static Map rankNodes( * OpenSearch, however, we do not have that sort of broadcast-to-all behavior. In order to prevent a node that gets a high score and * then never gets any more requests, we must ensure it eventually returns to a more normal score and can be a candidate for serving * requests. - * + *

                  * This adjustment takes the "winning" node's statistics and adds the average of those statistics with each non-winning node. Let's say * the winning node had a queue size of 10 and a non-winning node had a queue of 18. The average queue size is (10 + 18) / 2 = 14 so the * non-winning node will have statistics added for a queue size of 14. This is repeated for the response time and service times as well. diff --git a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java index f9c137d0eaf3f..4035738addca6 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java @@ -50,7 +50,7 @@ /** * Represents the recovery source of a shard. Available recovery types are: - * + *

                  * - {@link EmptyStoreRecoverySource} recovery from an empty store * - {@link ExistingStoreRecoverySource} recovery from an existing store * - {@link PeerRecoverySource} recovery from a primary on another node diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java index 1c8249815daa1..6bcc233c7fae6 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java @@ -70,7 +70,7 @@ * {@link RoutingNodes} represents a copy the routing information contained in the {@link ClusterState cluster state}. * It can be either initialized as mutable or immutable (see {@link #RoutingNodes(ClusterState, boolean)}), allowing * or disallowing changes to its elements. - * + *

                  * The main methods used to update routing entries are: *

                    *
                  • {@link #initializeShard} initializes an unassigned shard. @@ -82,6 +82,7 @@ * @opensearch.internal */ public class RoutingNodes implements Iterable { + private final Metadata metadata; private final Map nodesToShards = new HashMap<>(); @@ -107,6 +108,7 @@ public RoutingNodes(ClusterState clusterState) { } public RoutingNodes(ClusterState clusterState, boolean readOnly) { + this.metadata = clusterState.getMetadata(); this.readOnly = readOnly; final RoutingTable routingTable = clusterState.routingTable(); this.nodesPerAttributeNames = Collections.synchronizedMap(new HashMap<>()); @@ -367,10 +369,10 @@ public ShardRouting activePrimary(ShardId shardId) { /** * Returns one active replica shard for the given shard id or null if * no active replica is found. - * - * Since replicas could possibly be on nodes with a older version of OpenSearch than - * the primary is, this will return replicas on the highest version of OpenSearch. - * + *

                    + * Since replicas could possibly be on nodes with an older version of OpenSearch than + * the primary is, this will return replicas on the highest version of OpenSearch when document + * replication is enabled. */ public ShardRouting activeReplicaWithHighestVersion(ShardId shardId) { // It's possible for replicaNodeVersion to be null, when disassociating dead nodes @@ -390,6 +392,30 @@ public ShardRouting activeReplicaWithHighestVersion(ShardId shardId) { .orElse(null); } + /** + * Returns one active replica shard for the given shard id or null if + * no active replica is found. + *

                    + * Since replicas could possibly be on nodes with a higher version of OpenSearch than + * the primary is, this will return replicas on the oldest version of OpenSearch when segment + * replication is enabled to allow for replica to read segments from primary. + * + */ + public ShardRouting activeReplicaWithOldestVersion(ShardId shardId) { + // It's possible for replicaNodeVersion to be null. Therefore, we need to protect against the version being null + // (meaning the node will be going away). + return assignedShards(shardId).stream() + .filter(shr -> !shr.primary() && shr.active()) + .filter(shr -> node(shr.currentNodeId()) != null) + .min( + Comparator.comparing( + shr -> node(shr.currentNodeId()).node(), + Comparator.nullsFirst(Comparator.comparing(DiscoveryNode::getVersion)) + ) + ) + .orElse(null); + } + /** * Returns true iff all replicas are active for the given shard routing. Otherwise false */ @@ -518,9 +544,9 @@ public Tuple relocateShard( /** * Applies the relevant logic to start an initializing shard. - * + *

                    * Moves the initializing shard to started. If the shard is a relocation target, also removes the relocation source. - * + *

                    * If the started shard is a primary relocation target, this also reinitializes currently initializing replicas as their * recovery source changes * @@ -579,9 +605,9 @@ public ShardRouting startShard(Logger logger, ShardRouting initializingShard, Ro /** * Applies the relevant logic to handle a cancelled or failed shard. - * + *

                    * Moves the shard to unassigned or completely removes the shard (if relocation target). - * + *

                    * - If shard is a primary, this also fails initializing replicas. * - If shard is an active primary, this also promotes an active replica to primary (if such a replica exists). * - If shard is a relocating primary, this also removes the primary relocation target shard. @@ -724,7 +750,12 @@ private void unassignPrimaryAndPromoteActiveReplicaIfExists( RoutingChangesObserver routingChangesObserver ) { assert failedShard.primary(); - ShardRouting activeReplica = activeReplicaWithHighestVersion(failedShard.shardId()); + ShardRouting activeReplica; + if (metadata.isSegmentReplicationEnabled(failedShard.getIndexName())) { + activeReplica = activeReplicaWithOldestVersion(failedShard.shardId()); + } else { + activeReplica = activeReplicaWithHighestVersion(failedShard.shardId()); + } if (activeReplica == null) { moveToUnassigned(failedShard, unassignedInfo); } else { diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java index 9ca4da0b5a85e..2b56163f852e8 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java @@ -575,10 +575,11 @@ public Builder addAsFromOpenToClose(IndexMetadata indexMetadata) { public Builder addAsRemoteStoreRestore( IndexMetadata indexMetadata, RemoteStoreRecoverySource recoverySource, - Map activeInitializingShards + Map indexShardRoutingTableMap, + boolean forceRecoveryPrimary ) { IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetadata.getIndex()) - .initializeAsRemoteStoreRestore(indexMetadata, recoverySource, activeInitializingShards); + .initializeAsRemoteStoreRestore(indexMetadata, recoverySource, indexShardRoutingTableMap, forceRecoveryPrimary); add(indexRoutingBuilder); return this; } diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java b/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java index cfdeed5c227b6..7f5109416494e 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java +++ b/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java @@ -14,7 +14,7 @@ /** * ShardMovementStrategy defines the order in which shard movement occurs. - * + *

                    * ShardMovementStrategy values or rather their string representation to be used with * {@link BalancedShardsAllocator#SHARD_MOVEMENT_STRATEGY_SETTING} via cluster settings. * diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java index df3086e295c0f..8fe38d397bc1e 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java @@ -154,7 +154,7 @@ public enum Reason { /** * Captures the status of an unsuccessful allocation attempt for the shard, * causing it to remain in the unassigned state. - * + *

                    * Note, ordering of the enum is important, make sure to add new values * at the end and handle version serialization properly. * diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java index 19601483d5607..6fc0e535ef4dc 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java @@ -26,7 +26,7 @@ * This {@link AwarenessReplicaBalance} gives total unique values of awareness attributes * It takes in effect only iff cluster.routing.allocation.awareness.attributes and * cluster.routing.allocation.awareness.force.zone.values both are specified. - * + *

                    * This is used in enforcing total copy of shard is a maximum of unique values of awareness attributes * Helps in balancing shards across all awareness attributes and ensuring high availability of data. */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java index f209e993518c1..ae2d4a0926194 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java @@ -36,14 +36,14 @@ public class ConstraintTypes { /** * Constraint to control number of shards of an index allocated on a single * node. - * + *

                    * In current weight function implementation, when a node has significantly * fewer shards than other nodes (e.g. during single new node addition or node * replacement), its weight is much less than other nodes. All shard allocations * at this time tend to land on the new node with skewed weight. This breaks * index level balance in the cluster, by creating all shards of the same index * on one node, often resulting in a hotspot on that node. - * + *

                    * This constraint is breached when balancer attempts to allocate more than * average shards per index per node. */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java index ae8d92dae6811..7fc78b05880f3 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java @@ -59,9 +59,9 @@ /** * Observer that tracks changes made to RoutingNodes in order to update the primary terms and in-sync allocation ids in * {@link IndexMetadata} once the allocation round has completed. - * + *

                    * Primary terms are updated on primary initialization or when an active primary fails. - * + *

                    * Allocation ids are added for shards that become active and removed for shards that stop being active. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 90eff50fd9b5d..41ace0e7661fe 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -336,7 +336,7 @@ public boolean getPreferPrimaryBalance() { *

                  • *
                  * weight(node, index) = weightindex(node, index) + weightnode(node, index) - * + *

                  * package-private for testing */ static class WeightFunction { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index fd87fd3d93600..10e6733ae8c67 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -530,7 +530,7 @@ private void checkAndAddInEligibleTargetNode(RoutingNode targetNode) { /** * Move started shards that can not be allocated to a node anymore - * + *

                  * For each shard to be moved this function executes a move operation * to the minimal eligible node with respect to the * weight function. If a shard is moved the shard will be set to diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java index 63d8c656f5049..29e9acca4e6c2 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java @@ -63,12 +63,12 @@ public interface ShardsAllocator { * Returns the decision for where a shard should reside in the cluster. If the shard is unassigned, * then the {@link AllocateUnassignedDecision} will be non-null. If the shard is not in the unassigned * state, then the {@link MoveDecision} will be non-null. - * + *

                  * This method is primarily used by the cluster allocation explain API to provide detailed explanations * for the allocation of a single shard. Implementations of the {@link #allocate(RoutingAllocation)} method * may use the results of this method implementation to decide on allocating shards in the routing table * to the cluster. - * + *

                  * If an implementation of this interface does not support explaining decisions for a single shard through * the cluster explain API, then this method should throw a {@code UnsupportedOperationException}. */ diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java index 7fffb0299af85..def0411853643 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java @@ -43,7 +43,7 @@ /** * A command to move shards in some way. - * + *

                  * Commands are registered in {@link NetworkModule}. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java index 24c3fd7f34e4a..85f193c8c5580 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java @@ -109,7 +109,7 @@ public Decision canRebalance(RoutingAllocation allocation) { * Returns a {@link Decision} whether the given primary shard can be * forcibly allocated on the given node. This method should only be called * for unassigned primary shards where the node has a shard copy on disk. - * + *

                  * Note: all implementations that override this behavior should take into account * the results of {@link #canAllocate(ShardRouting, RoutingNode, RoutingAllocation)} * before making a decision on force allocation, because force allocation should only diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 1bd47f111591d..2c7df6b81e676 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -73,23 +73,23 @@ /** * The {@link DiskThresholdDecider} checks that the node a shard is potentially * being allocated to has enough disk space. - * + *

                  * It has three configurable settings, all of which can be changed dynamically: - * + *

                  * cluster.routing.allocation.disk.watermark.low is the low disk * watermark. New shards will not allocated to a node with usage higher than this, * although this watermark may be passed by allocating a shard. It defaults to * 0.85 (85.0%). - * + *

                  * cluster.routing.allocation.disk.watermark.high is the high disk * watermark. If a node has usage higher than this, shards are not allowed to * remain on the node. In addition, if allocating a shard to a node causes the * node to pass this watermark, it will not be allowed. It defaults to * 0.90 (90.0%). - * + *

                  * Both watermark settings are expressed in terms of used disk percentage, or * exact byte values for free space (like "500mb") - * + *

                  * cluster.routing.allocation.disk.threshold_enabled is used to * enable or disable this decider. It defaults to true (enabled). * @@ -119,7 +119,7 @@ public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings) /** * Returns the size of all shards that are currently being relocated to * the node, but may not be finished transferring yet. - * + *

                  * If subtractShardsMovingAway is true then the size of shards moving away is subtracted from the total size of all shards */ public static long sizeOfRelocatingShards( diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java index 1680f2d8cad1d..c2eccdbc6ed26 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java @@ -44,7 +44,7 @@ /** * An allocation decider that prevents multiple instances of the same shard to * be allocated on the same {@code node}. - * + *

                  * The {@link #CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING} setting allows to perform a check to prevent * allocation of multiple instances of the same shard on a single {@code host}, * based on host name and host address. Defaults to `false`, meaning that no diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 3a9fdf0ea10cf..26a04de31ce39 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -323,7 +323,7 @@ private Decision allocateShardCopies( * - the initializing shard routing if we want to assign the initializing shard to this node instead * - the started shard routing in case if we want to check if we can relocate to this node. * - the relocating shard routing if we want to relocate to this node now instead. - * + *

                  * This method returns the corresponding initializing shard that would be allocated to this node. */ private ShardRouting initializingShard(ShardRouting shardRouting, String currentNodeId) { diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index 007508162ba14..8e94e7cab23d3 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -294,7 +294,7 @@ public void addLocalNodeMasterListener(LocalNodeMasterListener listener) { /** * Adds a cluster state listener that is expected to be removed during a short period of time. * If provided, the listener will be notified once a specific time has elapsed. - * + *

                  * NOTE: the listener is not removed on timeout. This is the responsibility of the caller. */ public void addTimeoutListener(@Nullable final TimeValue timeout, final TimeoutClusterStateListener listener) { diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java index 8da6b1b941f83..afc4e36ec352e 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java @@ -29,7 +29,7 @@ /** * This class does throttling on task submission to cluster manager node, it uses throttling key defined in various executors * as key for throttling. Throttling will be performed over task executor's class level, different task types have different executors class. - * + *

                  * Set specific setting to for setting the threshold of throttling of particular task type. * e.g : Set "cluster_manager.throttling.thresholds.put_mapping" to set throttling limit of "put mapping" tasks, * Set it to default value(-1) to disable the throttling for this task type. @@ -117,9 +117,9 @@ public static TimeValue getMaxDelayForRetry() { * * Register task to cluster service with task key, * * override getClusterManagerThrottlingKey method with above task key in task executor. * * Verify that throttled tasks would be retried from data nodes - * + *

                  * Added retry mechanism in TransportClusterManagerNodeAction, so it would be retried for customer generated tasks. - * + *

                  * If tasks are not getting retried then we can register with false flag, so user won't be able to configure threshold limits for it. */ protected ThrottlingKey registerClusterManagerTask(String taskKey, boolean throttlingEnabled) { @@ -236,7 +236,7 @@ public void onBeginSubmit(List tasks) { * It may start throwing throttling exception to older nodes in cluster. * Older version nodes will not be equipped to handle the throttling exception and * this may result in unexpected behavior where internal tasks would start failing without any retries. - * + *

                  * For every task submission request, it will validate if nodes version is greater or equal to 2.5.0 and set the startThrottling flag. * Once the startThrottling flag is set, it will not perform check for next set of tasks. */ diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index e097803d86b48..aa7766979e851 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -45,6 +45,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.OperationRouting; import org.opensearch.cluster.routing.RerouteService; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; @@ -60,8 +61,9 @@ /** * Main Cluster Service * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ClusterService extends AbstractLifecycleComponent { private final ClusterManagerService clusterManagerService; diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterStateStats.java b/server/src/main/java/org/opensearch/cluster/service/ClusterStateStats.java new file mode 100644 index 0000000000000..96683ce720d0b --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterStateStats.java @@ -0,0 +1,120 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.service; + +import org.opensearch.cluster.coordination.PersistedStateStats; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Cluster state related stats. + * + * @opensearch.internal + */ +public class ClusterStateStats implements Writeable, ToXContentObject { + + private AtomicLong updateSuccess = new AtomicLong(0); + private AtomicLong updateTotalTimeInMillis = new AtomicLong(0); + private AtomicLong updateFailed = new AtomicLong(0); + private List persistenceStats = new ArrayList<>(); + + public ClusterStateStats() {} + + public long getUpdateSuccess() { + return updateSuccess.get(); + } + + public long getUpdateTotalTimeInMillis() { + return updateTotalTimeInMillis.get(); + } + + public long getUpdateFailed() { + return updateFailed.get(); + } + + public List getPersistenceStats() { + return persistenceStats; + } + + public void stateUpdated() { + updateSuccess.incrementAndGet(); + } + + public void stateUpdateFailed() { + updateFailed.incrementAndGet(); + } + + public void stateUpdateTook(long stateUpdateTime) { + updateTotalTimeInMillis.addAndGet(stateUpdateTime); + } + + public ClusterStateStats setPersistenceStats(List persistenceStats) { + this.persistenceStats = persistenceStats; + return this; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(updateSuccess.get()); + out.writeVLong(updateTotalTimeInMillis.get()); + out.writeVLong(updateFailed.get()); + out.writeVInt(persistenceStats.size()); + for (PersistedStateStats stats : persistenceStats) { + stats.writeTo(out); + } + } + + public ClusterStateStats(StreamInput in) throws IOException { + this.updateSuccess = new AtomicLong(in.readVLong()); + this.updateTotalTimeInMillis = new AtomicLong(in.readVLong()); + this.updateFailed = new AtomicLong(in.readVLong()); + int persistedStatsSize = in.readVInt(); + this.persistenceStats = new ArrayList<>(); + for (int statsNumber = 0; statsNumber < persistedStatsSize; statsNumber++) { + PersistedStateStats stats = new PersistedStateStats(in); + this.persistenceStats.add(stats); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.CLUSTER_STATE_STATS); + builder.startObject(Fields.OVERALL); + builder.field(Fields.UPDATE_COUNT, getUpdateSuccess()); + builder.field(Fields.TOTAL_TIME_IN_MILLIS, getUpdateTotalTimeInMillis()); + builder.field(Fields.FAILED_COUNT, getUpdateFailed()); + builder.endObject(); + for (PersistedStateStats stats : persistenceStats) { + stats.toXContent(builder, params); + } + builder.endObject(); + return builder; + } + + /** + * Fields for parsing and toXContent + * + * @opensearch.internal + */ + static final class Fields { + static final String CLUSTER_STATE_STATS = "cluster_state_stats"; + static final String OVERALL = "overall"; + static final String UPDATE_COUNT = "update_count"; + static final String TOTAL_TIME_IN_MILLIS = "total_time_in_millis"; + static final String FAILED_COUNT = "failed_count"; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java index 563b69dfd0e2a..07c3f93ae6486 100644 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/MasterService.java @@ -112,7 +112,9 @@ public class MasterService extends AbstractLifecycleComponent { static final String CLUSTER_MANAGER_UPDATE_THREAD_NAME = "clusterManagerService#updateTask"; - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #CLUSTER_MANAGER_UPDATE_THREAD_NAME} */ + /** + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #CLUSTER_MANAGER_UPDATE_THREAD_NAME} + */ @Deprecated static final String MASTER_UPDATE_THREAD_NAME = "masterService#updateTask"; @@ -130,6 +132,7 @@ public class MasterService extends AbstractLifecycleComponent { private volatile Batcher taskBatcher; protected final ClusterManagerTaskThrottler clusterManagerTaskThrottler; private final ClusterManagerThrottlingStats throttlingStats; + private final ClusterStateStats stateStats; public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { this.nodeName = Objects.requireNonNull(Node.NODE_NAME_SETTING.get(settings)); @@ -147,6 +150,7 @@ public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadP this::getMinNodeVersion, throttlingStats ); + this.stateStats = new ClusterStateStats(); this.threadPool = threadPool; } @@ -339,7 +343,7 @@ private TimeValue getTimeSince(long startTimeNanos) { return TimeValue.timeValueMillis(TimeValue.nsecToMSec(threadPool.preciseRelativeTimeInNanos() - startTimeNanos)); } - protected void publish(ClusterChangedEvent clusterChangedEvent, TaskOutputs taskOutputs, long startTimeMillis) { + protected void publish(ClusterChangedEvent clusterChangedEvent, TaskOutputs taskOutputs, long startTimeNanos) { final PlainActionFuture fut = new PlainActionFuture() { @Override protected boolean blockingAllowed() { @@ -352,8 +356,12 @@ protected boolean blockingAllowed() { try { FutureUtils.get(fut); onPublicationSuccess(clusterChangedEvent, taskOutputs); + final long durationMillis = getTimeSince(startTimeNanos).millis(); + stateStats.stateUpdateTook(durationMillis); + stateStats.stateUpdated(); } catch (Exception e) { - onPublicationFailed(clusterChangedEvent, taskOutputs, startTimeMillis, e); + stateStats.stateUpdateFailed(); + onPublicationFailed(clusterChangedEvent, taskOutputs, startTimeNanos, e); } } @@ -464,7 +472,6 @@ public Builder incrementVersion(ClusterState clusterState) { * @param source the source of the cluster state update task * @param updateTask the full context for the cluster state update * task - * */ public & ClusterStateTaskListener> void submitStateUpdateTask( String source, @@ -490,7 +497,6 @@ public & Cluster * @param listener callback after the cluster state update task * completes * @param the type of the cluster state update task state - * */ public void submitStateUpdateTask( String source, @@ -947,7 +953,7 @@ void onNoLongerClusterManager() { /** * Functionality for register task key to cluster manager node. * - * @param taskKey - task key of task + * @param taskKey - task key of task * @param throttlingEnabled - throttling is enabled for task or not i.e does data node perform retries on it or not * @return throttling task key which needs to be passed while submitting task to cluster manager */ @@ -966,7 +972,6 @@ public ClusterManagerTaskThrottler.ThrottlingKey registerClusterManagerTask(Stri * that share the same executor will be executed * batches on this executor * @param the type of the cluster state update task state - * */ public void submitStateUpdateTasks( final String source, @@ -996,4 +1001,8 @@ public void submitStateUpdateTasks( } } + public ClusterStateStats getClusterStateStats() { + return stateStats; + } + } diff --git a/server/src/main/java/org/opensearch/common/Randomness.java b/server/src/main/java/org/opensearch/common/Randomness.java index 2c60e848b9db9..221bc95c41f31 100644 --- a/server/src/main/java/org/opensearch/common/Randomness.java +++ b/server/src/main/java/org/opensearch/common/Randomness.java @@ -127,7 +127,7 @@ public static Random get() { /** * Provides a secure source of randomness. - * + *

                  * This acts exactly similar to {@link #get()}, but returning a new {@link SecureRandom}. */ public static SecureRandom createSecure() { diff --git a/server/src/main/java/org/opensearch/common/Rounding.java b/server/src/main/java/org/opensearch/common/Rounding.java index cb5c2e87180d1..438822cb83725 100644 --- a/server/src/main/java/org/opensearch/common/Rounding.java +++ b/server/src/main/java/org/opensearch/common/Rounding.java @@ -38,6 +38,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.LocalTimeOffset.Gap; import org.opensearch.common.LocalTimeOffset.Overlap; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.time.DateUtils; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; @@ -413,6 +414,21 @@ public Rounding build() { } private abstract class PreparedRounding implements Prepared { + /** + * The maximum limit up to which array-based prepared rounding is used. + * 128 is a power of two that isn't huge. We might be able to do + * better if the limit was based on the actual type of prepared + * rounding but this'll do for now. + */ + private static final int DEFAULT_ARRAY_ROUNDING_MAX_THRESHOLD = 128; + + /** + * The maximum limit up to which linear search is used, otherwise binary search is used. + * This is because linear search is much faster on small arrays. + * Benchmark results: PR #9727 + */ + private static final int LINEAR_SEARCH_ARRAY_ROUNDING_MAX_THRESHOLD = 64; + /** * Attempt to build a {@link Prepared} implementation that relies on pre-calcuated * "round down" points. If there would be more than {@code max} points then return @@ -436,7 +452,9 @@ protected Prepared maybeUseArray(long minUtcMillis, long maxUtcMillis, int max) values = ArrayUtil.grow(values, i + 1); values[i++] = rounded; } - return new ArrayRounding(values, i, this); + return i <= LINEAR_SEARCH_ARRAY_ROUNDING_MAX_THRESHOLD + ? new BidirectionalLinearSearchArrayRounding(values, i, this) + : new BinarySearchArrayRounding(values, i, this); } } @@ -529,12 +547,11 @@ private LocalDateTime truncateLocalDateTime(LocalDateTime localDateTime) { @Override public Prepared prepare(long minUtcMillis, long maxUtcMillis) { - /* - * 128 is a power of two that isn't huge. We might be able to do - * better if the limit was based on the actual type of prepared - * rounding but this'll do for now. - */ - return prepareOffsetOrJavaTimeRounding(minUtcMillis, maxUtcMillis).maybeUseArray(minUtcMillis, maxUtcMillis, 128); + return prepareOffsetOrJavaTimeRounding(minUtcMillis, maxUtcMillis).maybeUseArray( + minUtcMillis, + maxUtcMillis, + PreparedRounding.DEFAULT_ARRAY_ROUNDING_MAX_THRESHOLD + ); } private TimeUnitPreparedRounding prepareOffsetOrJavaTimeRounding(long minUtcMillis, long maxUtcMillis) { @@ -1345,14 +1362,19 @@ public static Rounding read(StreamInput in) throws IOException { /** * Implementation of {@link Prepared} using pre-calculated "round down" points. * + *

                  + * It uses binary search to find the greatest round-down point less than or equal to the given timestamp. + * * @opensearch.internal */ - private static class ArrayRounding implements Prepared { + @InternalApi + static class BinarySearchArrayRounding implements Prepared { private final long[] values; private final int max; private final Prepared delegate; - private ArrayRounding(long[] values, int max, Prepared delegate) { + BinarySearchArrayRounding(long[] values, int max, Prepared delegate) { + assert max > 0 : "at least one round-down point must be present"; this.values = values; this.max = max; this.delegate = delegate; @@ -1380,4 +1402,64 @@ public double roundingSize(long utcMillis, DateTimeUnit timeUnit) { return delegate.roundingSize(utcMillis, timeUnit); } } + + /** + * Implementation of {@link Prepared} using pre-calculated "round down" points. + * + *

                  + * It uses linear search to find the greatest round-down point less than or equal to the given timestamp. + * For small inputs (≤ 64 elements), this can be much faster than binary search as it avoids the penalty of + * branch mispredictions and pipeline stalls, and accesses memory sequentially. + * + *

                  + * It uses "meet in the middle" linear search to avoid the worst case scenario when the desired element is present + * at either side of the array. This is helpful for time-series data where velocity increases over time, so more + * documents are likely to find a greater timestamp which is likely to be present on the right end of the array. + * + * @opensearch.internal + */ + @InternalApi + static class BidirectionalLinearSearchArrayRounding implements Prepared { + private final long[] ascending; + private final long[] descending; + private final Prepared delegate; + + BidirectionalLinearSearchArrayRounding(long[] values, int max, Prepared delegate) { + assert max > 0 : "at least one round-down point must be present"; + this.delegate = delegate; + int len = (max + 1) >>> 1; // rounded-up to handle odd number of values + ascending = new long[len]; + descending = new long[len]; + + for (int i = 0; i < len; i++) { + ascending[i] = values[i]; + descending[i] = values[max - i - 1]; + } + } + + @Override + public long round(long utcMillis) { + int i = 0; + for (; i < ascending.length; i++) { + if (descending[i] <= utcMillis) { + return descending[i]; + } + if (ascending[i] > utcMillis) { + assert i > 0 : "utcMillis must be after " + ascending[0]; + return ascending[i - 1]; + } + } + return ascending[i - 1]; + } + + @Override + public long nextRoundingValue(long utcMillis) { + return delegate.nextRoundingValue(utcMillis); + } + + @Override + public double roundingSize(long utcMillis, DateTimeUnit timeUnit) { + return delegate.roundingSize(utcMillis, timeUnit); + } + } } diff --git a/server/src/main/java/org/opensearch/common/StreamContext.java b/server/src/main/java/org/opensearch/common/StreamContext.java index 32f095f8488b7..b163ba65dc7db 100644 --- a/server/src/main/java/org/opensearch/common/StreamContext.java +++ b/server/src/main/java/org/opensearch/common/StreamContext.java @@ -44,6 +44,16 @@ public StreamContext( this.numberOfParts = numberOfParts; } + /** + * Copy constructor for overriding class + */ + protected StreamContext(StreamContext streamContext) { + this.streamSupplier = streamContext.streamSupplier; + this.partSize = streamContext.partSize; + this.numberOfParts = streamContext.numberOfParts; + this.lastPartSize = streamContext.lastPartSize; + } + /** * Vendor plugins can use this method to create new streams only when they are required for processing * New streams won't be created till this method is called with the specific partNumber diff --git a/server/src/main/java/org/opensearch/common/StreamLimiter.java b/server/src/main/java/org/opensearch/common/StreamLimiter.java new file mode 100644 index 0000000000000..ec203a1c30868 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/StreamLimiter.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common; + +import org.apache.lucene.store.RateLimiter; + +import java.io.IOException; +import java.util.function.Supplier; + +/** + * The stream limiter that limits the transfer of bytes + * + * @opensearch.internal + */ +public class StreamLimiter { + + private final Supplier rateLimiterSupplier; + + private final StreamLimiter.Listener listener; + + private int bytesSinceLastRateLimit; + + public StreamLimiter(Supplier rateLimiterSupplier, Listener listener) { + this.rateLimiterSupplier = rateLimiterSupplier; + this.listener = listener; + } + + public void maybePause(int bytes) throws IOException { + bytesSinceLastRateLimit += bytes; + final RateLimiter rateLimiter = rateLimiterSupplier.get(); + if (rateLimiter != null) { + if (bytesSinceLastRateLimit >= rateLimiter.getMinPauseCheckBytes()) { + long pause = rateLimiter.pause(bytesSinceLastRateLimit); + bytesSinceLastRateLimit = 0; + if (pause > 0) { + listener.onPause(pause); + } + } + } + } + + /** + * Internal listener + * + * @opensearch.internal + */ + public interface Listener { + void onPause(long nanos); + } +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/VerifyingMultiStreamBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java similarity index 51% rename from server/src/main/java/org/opensearch/common/blobstore/VerifyingMultiStreamBlobContainer.java rename to server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java index d10445ba14d76..97f304d776f5c 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/VerifyingMultiStreamBlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java @@ -8,18 +8,20 @@ package org.opensearch.common.blobstore; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.blobstore.stream.read.ReadContext; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.core.action.ActionListener; import java.io.IOException; /** - * An extension of {@link BlobContainer} that adds {@link VerifyingMultiStreamBlobContainer#asyncBlobUpload} to allow + * An extension of {@link BlobContainer} that adds {@link AsyncMultiStreamBlobContainer#asyncBlobUpload} to allow * multipart uploads and performs integrity checks on transferred files * * @opensearch.internal */ -public interface VerifyingMultiStreamBlobContainer extends BlobContainer { +public interface AsyncMultiStreamBlobContainer extends BlobContainer { /** * Reads blob content from multiple streams, each from a specific part of the file, which is provided by the @@ -31,4 +33,19 @@ public interface VerifyingMultiStreamBlobContainer extends BlobContainer { * @throws IOException if any of the input streams could not be read, or the target blob could not be written to */ void asyncBlobUpload(WriteContext writeContext, ActionListener completionListener) throws IOException; + + /** + * Creates an async callback of a {@link ReadContext} containing the multipart streams for a specified blob within the container. + * @param blobName The name of the blob for which the {@link ReadContext} needs to be fetched. + * @param listener Async listener for {@link ReadContext} object which serves the input streams and other metadata for the blob + */ + @ExperimentalApi + void readBlobAsync(String blobName, ActionListener listener); + + /* + * Wether underlying blobContainer can verify integrity of data after transfer. If true and if expected + * checksum is provided in WriteContext, then the checksum of transferred data is compared with expected checksum + * by underlying blobContainer. In this case, caller doesn't need to ensure integrity of data. + */ + boolean remoteIntegrityCheckSupported(); } diff --git a/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java new file mode 100644 index 0000000000000..82bc7a0baed50 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java @@ -0,0 +1,174 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore; + +import org.opensearch.common.StreamContext; +import org.opensearch.common.blobstore.stream.read.ReadContext; +import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.DecryptedRangedStreamProvider; +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.core.action.ActionListener; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; +import java.util.stream.Collectors; + +/** + * EncryptedBlobContainer is an encrypted BlobContainer that is backed by a + * {@link AsyncMultiStreamBlobContainer} + * + * @opensearch.internal + */ +public class AsyncMultiStreamEncryptedBlobContainer extends EncryptedBlobContainer implements AsyncMultiStreamBlobContainer { + + private final AsyncMultiStreamBlobContainer blobContainer; + private final CryptoHandler cryptoHandler; + + public AsyncMultiStreamEncryptedBlobContainer(AsyncMultiStreamBlobContainer blobContainer, CryptoHandler cryptoHandler) { + super(blobContainer, cryptoHandler); + this.blobContainer = blobContainer; + this.cryptoHandler = cryptoHandler; + } + + @Override + public void asyncBlobUpload(WriteContext writeContext, ActionListener completionListener) throws IOException { + EncryptedWriteContext encryptedWriteContext = new EncryptedWriteContext<>(writeContext, cryptoHandler); + blobContainer.asyncBlobUpload(encryptedWriteContext, completionListener); + } + + @Override + public void readBlobAsync(String blobName, ActionListener listener) { + try { + final U cryptoContext = cryptoHandler.loadEncryptionMetadata(getEncryptedHeaderContentSupplier(blobName)); + ActionListener decryptingCompletionListener = ActionListener.map( + listener, + readContext -> new DecryptedReadContext<>(readContext, cryptoHandler, cryptoContext) + ); + + blobContainer.readBlobAsync(blobName, decryptingCompletionListener); + } catch (Exception e) { + listener.onFailure(e); + } + } + + @Override + public boolean remoteIntegrityCheckSupported() { + return false; + } + + static class EncryptedWriteContext extends WriteContext { + + private final T encryptionMetadata; + private final CryptoHandler cryptoHandler; + private final long fileSize; + + /** + * Construct a new encrypted WriteContext object + */ + public EncryptedWriteContext(WriteContext writeContext, CryptoHandler cryptoHandler) { + super(writeContext); + this.cryptoHandler = cryptoHandler; + this.encryptionMetadata = this.cryptoHandler.initEncryptionMetadata(); + this.fileSize = this.cryptoHandler.estimateEncryptedLengthOfEntireContent(encryptionMetadata, writeContext.getFileSize()); + } + + public StreamContext getStreamProvider(long partSize) { + long adjustedPartSize = cryptoHandler.adjustContentSizeForPartialEncryption(encryptionMetadata, partSize); + StreamContext streamContext = super.getStreamProvider(adjustedPartSize); + return new EncryptedStreamContext<>(streamContext, cryptoHandler, encryptionMetadata); + } + + /** + * @return The total size of the encrypted file + */ + public long getFileSize() { + return fileSize; + } + } + + static class EncryptedStreamContext extends StreamContext { + + private final CryptoHandler cryptoHandler; + private final T encryptionMetadata; + + /** + * Construct a new encrypted StreamContext object + */ + public EncryptedStreamContext(StreamContext streamContext, CryptoHandler cryptoHandler, T encryptionMetadata) { + super(streamContext); + this.cryptoHandler = cryptoHandler; + this.encryptionMetadata = encryptionMetadata; + } + + @Override + public InputStreamContainer provideStream(int partNumber) throws IOException { + InputStreamContainer inputStreamContainer = super.provideStream(partNumber); + return cryptoHandler.createEncryptingStreamOfPart(encryptionMetadata, inputStreamContainer, getNumberOfParts(), partNumber); + } + + } + + /** + * DecryptedReadContext decrypts the encrypted {@link ReadContext} by acting as a transformation wrapper around + * the encrypted object + * @param Encryption Metadata / CryptoContext for the {@link CryptoHandler} instance + * @param Parsed Encryption Metadata / CryptoContext for the {@link CryptoHandler} instance + */ + static class DecryptedReadContext extends ReadContext { + + private final CryptoHandler cryptoHandler; + private final U cryptoContext; + private Long blobSize; + + public DecryptedReadContext(ReadContext readContext, CryptoHandler cryptoHandler, U cryptoContext) { + super(readContext); + this.cryptoHandler = cryptoHandler; + this.cryptoContext = cryptoContext; + } + + @Override + public long getBlobSize() { + // initializes the value lazily + if (blobSize == null) { + this.blobSize = this.cryptoHandler.estimateDecryptedLength(cryptoContext, super.getBlobSize()); + } + return this.blobSize; + } + + @Override + public List getPartStreams() { + return super.getPartStreams().stream() + .map(supplier -> (StreamPartCreator) () -> supplier.get().thenApply(this::decryptInputStreamContainer)) + .collect(Collectors.toUnmodifiableList()); + } + + /** + * Transforms an encrypted {@link InputStreamContainer} to a decrypted instance + * @param inputStreamContainer encrypted input stream container instance + * @return decrypted input stream container instance + */ + private InputStreamContainer decryptInputStreamContainer(InputStreamContainer inputStreamContainer) { + long startOfStream = inputStreamContainer.getOffset(); + long endOfStream = startOfStream + inputStreamContainer.getContentLength() - 1; + DecryptedRangedStreamProvider decryptedStreamProvider = cryptoHandler.createDecryptingStreamOfRange( + cryptoContext, + startOfStream, + endOfStream + ); + + long adjustedPos = decryptedStreamProvider.getAdjustedRange()[0]; + long adjustedLength = decryptedStreamProvider.getAdjustedRange()[1] - adjustedPos + 1; + final InputStream decryptedStream = decryptedStreamProvider.getDecryptedStreamProvider() + .apply(inputStreamContainer.getInputStream()); + return new InputStreamContainer(decryptedStream, adjustedLength, adjustedPos); + } + } +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java index cba6579190c26..2e25a532b5abf 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java @@ -93,10 +93,10 @@ public interface BlobContainer { /** * Provides a hint to clients for a suitable length to use with {@link BlobContainer#readBlob(String, long, long)}. - * + *

                  * Some blob containers have nontrivial costs attached to each readBlob call, so it is a good idea for consumers to speculatively * request more data than they need right now and to re-use this stream for future needs if possible. - * + *

                  * Also, some blob containers return streams that are expensive to close before the stream has been fully consumed, and the cost may * depend on the length of the data that was left unconsumed. For these containers it's best to bound the cost of a partial read by * bounding the length of the data requested. @@ -131,7 +131,7 @@ default long readBlobPreferredLength() { /** * Reads blob content from the input stream and writes it to the container in a new blob with the given name, * using an atomic write operation if the implementation supports it. - * + *

                  * This method assumes the container does not already contain a blob of the same blobName. If a blob by the * same name already exists, the operation will fail and an {@link IOException} will be thrown. * @@ -231,11 +231,19 @@ default void listBlobsByPrefixInSortedOrder( throw new IllegalArgumentException("limit should not be a negative value"); } try { - List blobNames = new ArrayList<>(listBlobsByPrefix(blobNamePrefix).values()); - blobNames.sort(blobNameSortOrder.comparator()); - listener.onResponse(blobNames.subList(0, Math.min(blobNames.size(), limit))); + listener.onResponse(listBlobsByPrefixInSortedOrder(blobNamePrefix, limit, blobNameSortOrder)); } catch (Exception e) { listener.onFailure(e); } } + + default List listBlobsByPrefixInSortedOrder(String blobNamePrefix, int limit, BlobNameSortOrder blobNameSortOrder) + throws IOException { + if (limit < 0) { + throw new IllegalArgumentException("limit should not be a negative value"); + } + List blobNames = new ArrayList<>(listBlobsByPrefix(blobNamePrefix).values()); + blobNames.sort(blobNameSortOrder.comparator()); + return blobNames.subList(0, Math.min(blobNames.size(), limit)); + } } diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java index ab40b1e2a082e..0f6646d37f950 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java @@ -31,6 +31,8 @@ package org.opensearch.common.blobstore; +import org.opensearch.cluster.metadata.RepositoryMetadata; + import java.io.Closeable; import java.util.Collections; import java.util.Map; @@ -47,10 +49,46 @@ public interface BlobStore extends Closeable { */ BlobContainer blobContainer(BlobPath path); + /** + * Returns statistics on the count of operations that have been performed on this blob store + */ /** * Returns statistics on the count of operations that have been performed on this blob store */ default Map stats() { return Collections.emptyMap(); } + + /** + * Returns details statistics of operations that have been performed on this blob store + */ + default Map> extendedStats() { + return Collections.emptyMap(); + } + + /** + * Reload the blob store inplace + */ + default void reload(RepositoryMetadata repositoryMetadata) {} + + /** + * Metrics for BlobStore interactions + */ + enum Metric { + REQUEST_SUCCESS("request_success_total"), + REQUEST_FAILURE("request_failures_total"), + REQUEST_LATENCY("request_time_in_millis"), + RETRY_COUNT("request_retry_count_total"); + + private String metricName; + + Metric(String name) { + this.metricName = name; + } + + public String metricName() { + return this.metricName; + } + } + } diff --git a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java new file mode 100644 index 0000000000000..d0933741339d9 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobContainer.java @@ -0,0 +1,194 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore; + +import org.opensearch.common.CheckedBiConsumer; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.DecryptedRangedStreamProvider; +import org.opensearch.common.crypto.EncryptedHeaderContentSupplier; +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.core.action.ActionListener; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * EncryptedBlobContainer is a wrapper around BlobContainer that encrypts the data on the fly. + */ +public class EncryptedBlobContainer implements BlobContainer { + + private final BlobContainer blobContainer; + private final CryptoHandler cryptoHandler; + + public EncryptedBlobContainer(BlobContainer blobContainer, CryptoHandler cryptoHandler) { + this.blobContainer = blobContainer; + this.cryptoHandler = cryptoHandler; + } + + @Override + public BlobPath path() { + return blobContainer.path(); + } + + @Override + public boolean blobExists(String blobName) throws IOException { + return blobContainer.blobExists(blobName); + } + + @Override + public InputStream readBlob(String blobName) throws IOException { + InputStream inputStream = blobContainer.readBlob(blobName); + return cryptoHandler.createDecryptingStream(inputStream); + } + + EncryptedHeaderContentSupplier getEncryptedHeaderContentSupplier(String blobName) { + return (start, end) -> { + byte[] buffer; + int length = (int) (end - start + 1); + try (InputStream inputStream = blobContainer.readBlob(blobName, start, length)) { + buffer = new byte[length]; + inputStream.readNBytes(buffer, (int) start, buffer.length); + } + return buffer; + }; + } + + @Override + public InputStream readBlob(String blobName, long position, long length) throws IOException { + U encryptionMetadata = cryptoHandler.loadEncryptionMetadata(getEncryptedHeaderContentSupplier(blobName)); + DecryptedRangedStreamProvider decryptedStreamProvider = cryptoHandler.createDecryptingStreamOfRange( + encryptionMetadata, + position, + position + length - 1 + ); + long adjustedPos = decryptedStreamProvider.getAdjustedRange()[0]; + long adjustedLength = decryptedStreamProvider.getAdjustedRange()[1] - adjustedPos + 1; + InputStream encryptedStream = blobContainer.readBlob(blobName, adjustedPos, adjustedLength); + return decryptedStreamProvider.getDecryptedStreamProvider().apply(encryptedStream); + } + + @Override + public long readBlobPreferredLength() { + return blobContainer.readBlobPreferredLength(); + } + + private void executeWrite(InputStream inputStream, long blobSize, CheckedBiConsumer writeConsumer) + throws IOException { + T cryptoContext = cryptoHandler.initEncryptionMetadata(); + InputStreamContainer streamContainer = new InputStreamContainer(inputStream, blobSize, 0); + InputStreamContainer encryptedStream = cryptoHandler.createEncryptingStream(cryptoContext, streamContainer); + long cryptoLength = cryptoHandler.estimateEncryptedLengthOfEntireContent(cryptoContext, blobSize); + writeConsumer.accept(encryptedStream.getInputStream(), cryptoLength); + } + + @Override + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { + executeWrite( + inputStream, + blobSize, + (encryptedStream, encryptedLength) -> blobContainer.writeBlob(blobName, encryptedStream, encryptedLength, failIfAlreadyExists) + ); + } + + @Override + public void writeBlobAtomic(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { + executeWrite( + inputStream, + blobSize, + (encryptedStream, encryptedLength) -> blobContainer.writeBlobAtomic( + blobName, + encryptedStream, + encryptedLength, + failIfAlreadyExists + ) + ); + } + + @Override + public DeleteResult delete() throws IOException { + return blobContainer.delete(); + } + + @Override + public void deleteBlobsIgnoringIfNotExists(List blobNames) throws IOException { + blobContainer.deleteBlobsIgnoringIfNotExists(blobNames); + } + + @Override + public Map listBlobs() throws IOException { + Map blobMetadataMap = blobContainer.listBlobs(); + return convertToEncryptedMetadataMap(blobMetadataMap); + } + + @Override + public Map children() throws IOException { + Map children = blobContainer.children(); + if (children != null) { + return children.entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> new EncryptedBlobContainer<>(entry.getValue(), cryptoHandler))); + } else { + return null; + } + } + + @Override + public Map listBlobsByPrefix(String blobNamePrefix) throws IOException { + Map blobMetadataMap = blobContainer.listBlobsByPrefix(blobNamePrefix); + return convertToEncryptedMetadataMap(blobMetadataMap); + } + + private Map convertToEncryptedMetadataMap(Map blobMetadataMap) { + if (blobMetadataMap == null) { + return null; + } + + return blobMetadataMap.entrySet() + .stream() + .collect( + Collectors.toMap( + Map.Entry::getKey, + entry -> new EncryptedBlobMetadata<>(entry.getValue(), cryptoHandler, getEncryptedHeaderContentSupplier(entry.getKey())) + ) + ); + + } + + @Override + public void listBlobsByPrefixInSortedOrder( + String blobNamePrefix, + int limit, + BlobNameSortOrder blobNameSortOrder, + ActionListener> listener + ) { + ActionListener> encryptedMetadataListener = ActionListener.delegateFailure( + listener, + (delegatedListener, metadataList) -> { + if (metadataList != null) { + List encryptedMetadata = metadataList.stream() + .map( + blobMetadata -> new EncryptedBlobMetadata<>( + blobMetadata, + cryptoHandler, + getEncryptedHeaderContentSupplier(blobMetadata.name()) + ) + ) + .collect(Collectors.toList()); + delegatedListener.onResponse(encryptedMetadata); + } else { + delegatedListener.onResponse(null); + } + } + ); + blobContainer.listBlobsByPrefixInSortedOrder(blobNamePrefix, limit, blobNameSortOrder, encryptedMetadataListener); + } +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobMetadata.java b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobMetadata.java new file mode 100644 index 0000000000000..8917bba806d08 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobMetadata.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore; + +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.EncryptedHeaderContentSupplier; + +import java.io.IOException; + +/** + * Adjusts length of encrypted blob to raw length + */ +public class EncryptedBlobMetadata implements BlobMetadata { + private final EncryptedHeaderContentSupplier encryptedHeaderContentSupplier; + private final BlobMetadata delegate; + private final CryptoHandler cryptoHandler; + + public EncryptedBlobMetadata( + BlobMetadata delegate, + CryptoHandler cryptoHandler, + EncryptedHeaderContentSupplier encryptedHeaderContentSupplier + ) { + this.encryptedHeaderContentSupplier = encryptedHeaderContentSupplier; + this.delegate = delegate; + this.cryptoHandler = cryptoHandler; + } + + @Override + public String name() { + return delegate.name(); + } + + @Override + public long length() { + U cryptoContext; + try { + cryptoContext = cryptoHandler.loadEncryptionMetadata(encryptedHeaderContentSupplier); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + return cryptoHandler.estimateDecryptedLength(cryptoContext, delegate.length()); + } +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java new file mode 100644 index 0000000000000..a18ca8b9d5c39 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/EncryptedBlobStore.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore; + +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.crypto.CryptoHandlerRegistry; +import org.opensearch.crypto.CryptoRegistryException; + +import java.io.IOException; +import java.util.Map; + +/** + * The EncryptedBlobStore is a decorator class that wraps an existing BlobStore and provides encryption and decryption + * capabilities for the stored data. It uses a CryptoManager to handle encryption and decryption operations based on + * the provided CryptoMetadata. The EncryptedBlobStore ensures that all data written to and read from the underlying + * BlobStore is encrypted and decrypted transparently. + */ +public class EncryptedBlobStore implements BlobStore { + + private final BlobStore blobStore; + private final CryptoHandler cryptoHandler; + + /** + * Constructs an EncryptedBlobStore that wraps the provided BlobStore with encryption capabilities based on the + * given CryptoMetadata. + * + * @param blobStore The underlying BlobStore to be wrapped and used for storing encrypted data. + * @param cryptoMetadata The CryptoMetadata containing information about the key provider and settings for encryption. + * @throws CryptoRegistryException If the CryptoManager is not found during encrypted BlobStore creation. + */ + public EncryptedBlobStore(BlobStore blobStore, CryptoMetadata cryptoMetadata) { + CryptoHandlerRegistry cryptoHandlerRegistry = CryptoHandlerRegistry.getInstance(); + assert cryptoHandlerRegistry != null : "CryptoManagerRegistry is not initialized"; + this.cryptoHandler = cryptoHandlerRegistry.fetchCryptoHandler(cryptoMetadata); + if (cryptoHandler == null) { + throw new CryptoRegistryException( + cryptoMetadata.keyProviderName(), + cryptoMetadata.keyProviderType(), + "Crypto manager not found during encrypted blob store creation." + ); + } + this.blobStore = blobStore; + } + + /** + * Retrieves a BlobContainer from the underlying BlobStore based on the provided BlobPath. The returned BlobContainer + * is wrapped in an EncryptedBlobContainer to enable transparent encryption and decryption of data. + * + * @param path The BlobPath specifying the location of the BlobContainer. + * @return An EncryptedBlobContainer wrapping the BlobContainer retrieved from the underlying BlobStore. + */ + @Override + public BlobContainer blobContainer(BlobPath path) { + BlobContainer blobContainer = blobStore.blobContainer(path); + if (blobContainer instanceof AsyncMultiStreamBlobContainer) { + return new AsyncMultiStreamEncryptedBlobContainer<>((AsyncMultiStreamBlobContainer) blobContainer, cryptoHandler); + } + return new EncryptedBlobContainer<>(blobContainer, cryptoHandler); + } + + /** + * Retrieves statistics about the BlobStore. Delegates the call to the underlying BlobStore's stats() method. + * + * @return A map containing statistics about the BlobStore. + */ + @Override + public Map stats() { + return blobStore.stats(); + } + + /** + * Retrieves extended statistics about the BlobStore. Delegates the call to the underlying BlobStore's extendedStats() method. + * + * @return A map containing extended statistics about the BlobStore. + */ + @Override + public Map> extendedStats() { + return blobStore.extendedStats(); + } + + /** + * Closes the EncryptedBlobStore by decrementing the reference count of the CryptoManager and closing the + * underlying BlobStore. This ensures proper cleanup of resources. + * + * @throws IOException If an I/O error occurs while closing the BlobStore. + */ + @Override + public void close() throws IOException { + cryptoHandler.close(); + blobStore.close(); + } + +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java index 394855671688a..b6644ffd16bab 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/fs/FsBlobContainer.java @@ -69,7 +69,7 @@ /** * A file system based implementation of {@link org.opensearch.common.blobstore.BlobContainer}. * All blobs in the container are stored on a file system, the location of which is specified by the {@link BlobPath}. - * + *

                  * Note that the methods in this implementation of {@link org.opensearch.common.blobstore.BlobContainer} may * additionally throw a {@link java.lang.SecurityException} if the configured {@link java.lang.SecurityManager} * does not permit read and/or write access to the underlying files. @@ -258,7 +258,7 @@ public static String tempBlobName(final String blobName) { /** * Returns true if the blob is a leftover temporary blob. - * + *

                  * The temporary blobs might be left after failed atomic write operation. */ public static boolean isTempBlobName(final String blobName) { diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java new file mode 100644 index 0000000000000..4bdce11ff4f9a --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.stream.read; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.io.InputStreamContainer; + +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; + +/** + * ReadContext is used to encapsulate all data needed by BlobContainer#readBlobAsync + */ +@ExperimentalApi +public class ReadContext { + private final long blobSize; + private final List asyncPartStreams; + private final String blobChecksum; + + public ReadContext(long blobSize, List asyncPartStreams, String blobChecksum) { + this.blobSize = blobSize; + this.asyncPartStreams = asyncPartStreams; + this.blobChecksum = blobChecksum; + } + + public ReadContext(ReadContext readContext) { + this.blobSize = readContext.blobSize; + this.asyncPartStreams = readContext.asyncPartStreams; + this.blobChecksum = readContext.blobChecksum; + } + + public String getBlobChecksum() { + return blobChecksum; + } + + public int getNumberOfParts() { + return asyncPartStreams.size(); + } + + public long getBlobSize() { + return blobSize; + } + + public List getPartStreams() { + return asyncPartStreams; + } + + /** + * Functional interface defining an instance that can create an async action + * to create a part of an object represented as an InputStreamContainer. + */ + @FunctionalInterface + public interface StreamPartCreator extends Supplier> { + /** + * Kicks off a async process to start streaming. + * + * @return When the returned future is completed, streaming has + * just begun. Clients must fully consume the resulting stream. + */ + @Override + CompletableFuture get(); + } +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriter.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriter.java new file mode 100644 index 0000000000000..1a403200249cd --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriter.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.stream.read.listener; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.io.Channels; +import org.opensearch.common.io.InputStreamContainer; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.function.UnaryOperator; + +/** + * FilePartWriter transfers the provided stream into the specified file path using a {@link FileChannel} + * instance. + */ +@InternalApi +class FilePartWriter { + // 8 MB buffer for transfer + private static final int BUFFER_SIZE = 8 * 1024 * 2024; + + public static void write(Path fileLocation, InputStreamContainer stream, UnaryOperator rateLimiter) throws IOException { + try (FileChannel outputFileChannel = FileChannel.open(fileLocation, StandardOpenOption.WRITE, StandardOpenOption.CREATE)) { + try (InputStream inputStream = rateLimiter.apply(stream.getInputStream())) { + long streamOffset = stream.getOffset(); + final byte[] buffer = new byte[BUFFER_SIZE]; + int bytesRead; + while ((bytesRead = inputStream.read(buffer)) != -1) { + Channels.writeToChannel(buffer, 0, bytesRead, outputFileChannel, streamOffset); + streamOffset += bytesRead; + } + } + } + } +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java new file mode 100644 index 0000000000000..c77f2384ace0d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java @@ -0,0 +1,199 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.stream.read.listener; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.IOUtils; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.blobstore.stream.read.ReadContext; +import org.opensearch.core.action.ActionListener; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.Collection; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.UnaryOperator; + +/** + * ReadContextListener orchestrates the async file fetch from the {@link org.opensearch.common.blobstore.BlobContainer} + * using a {@link ReadContext} callback. On response, it spawns off the download using multiple streams. + */ +@InternalApi +public class ReadContextListener implements ActionListener { + private static final Logger logger = LogManager.getLogger(ReadContextListener.class); + private static final String DOWNLOAD_PREFIX = "download."; + private final String blobName; + private final Path fileLocation; + private final String tmpFileName; + private final Path tmpFileLocation; + private final ActionListener completionListener; + private final ThreadPool threadPool; + private final UnaryOperator rateLimiter; + private final int maxConcurrentStreams; + + public ReadContextListener( + String blobName, + Path fileLocation, + ActionListener completionListener, + ThreadPool threadPool, + UnaryOperator rateLimiter, + int maxConcurrentStreams + ) { + this.blobName = blobName; + this.fileLocation = fileLocation; + this.completionListener = completionListener; + this.threadPool = threadPool; + this.rateLimiter = rateLimiter; + this.maxConcurrentStreams = maxConcurrentStreams; + this.tmpFileName = DOWNLOAD_PREFIX + UUIDs.randomBase64UUID() + "." + blobName; + this.tmpFileLocation = fileLocation.getParent().resolve(tmpFileName); + } + + @Override + public void onResponse(ReadContext readContext) { + logger.debug("Received {} parts for blob {}", readContext.getNumberOfParts(), blobName); + final int numParts = readContext.getNumberOfParts(); + final AtomicBoolean anyPartStreamFailed = new AtomicBoolean(false); + final GroupedActionListener groupedListener = new GroupedActionListener<>(getFileCompletionListener(), numParts); + final Queue queue = new ConcurrentLinkedQueue<>(readContext.getPartStreams()); + final StreamPartProcessor processor = new StreamPartProcessor( + queue, + anyPartStreamFailed, + tmpFileLocation, + groupedListener, + threadPool.executor(ThreadPool.Names.REMOTE_RECOVERY), + rateLimiter + ); + for (int i = 0; i < Math.min(maxConcurrentStreams, queue.size()); i++) { + processor.process(queue.poll()); + } + } + + @SuppressForbidden(reason = "need to fsync once all parts received") + private ActionListener> getFileCompletionListener() { + return ActionListener.wrap(response -> { + logger.trace("renaming temp file [{}] to [{}]", tmpFileLocation, fileLocation); + try { + IOUtils.fsync(tmpFileLocation, false); + Files.move(tmpFileLocation, fileLocation, StandardCopyOption.ATOMIC_MOVE); + // sync parent dir metadata + IOUtils.fsync(fileLocation.getParent(), true); + completionListener.onResponse(blobName); + } catch (IOException e) { + logger.error("Unable to rename temp file + " + tmpFileLocation, e); + completionListener.onFailure(e); + } + }, e -> { + try { + Files.deleteIfExists(tmpFileLocation); + } catch (IOException ex) { + logger.warn("Unable to clean temp file {}", tmpFileLocation); + } + completionListener.onFailure(e); + }); + } + + /* + * For Tests + */ + Path getTmpFileLocation() { + return tmpFileLocation; + } + + @Override + public void onFailure(Exception e) { + completionListener.onFailure(e); + } + + private static class StreamPartProcessor { + private static final RuntimeException CANCELED_PART_EXCEPTION = new RuntimeException( + "Canceled part download due to previous failure" + ); + private final Queue queue; + private final AtomicBoolean anyPartStreamFailed; + private final Path fileLocation; + private final GroupedActionListener completionListener; + private final Executor executor; + private final UnaryOperator rateLimiter; + + private StreamPartProcessor( + Queue queue, + AtomicBoolean anyPartStreamFailed, + Path fileLocation, + GroupedActionListener completionListener, + Executor executor, + UnaryOperator rateLimiter + ) { + this.queue = queue; + this.anyPartStreamFailed = anyPartStreamFailed; + this.fileLocation = fileLocation; + this.completionListener = completionListener; + this.executor = executor; + this.rateLimiter = rateLimiter; + } + + private void process(ReadContext.StreamPartCreator supplier) { + if (supplier == null) { + return; + } + supplier.get().whenCompleteAsync((blobPartStreamContainer, throwable) -> { + if (throwable != null) { + processFailure(throwable instanceof Exception ? (Exception) throwable : new RuntimeException(throwable)); + } else if (anyPartStreamFailed.get()) { + processFailure(CANCELED_PART_EXCEPTION); + } else { + try { + FilePartWriter.write(fileLocation, blobPartStreamContainer, rateLimiter); + completionListener.onResponse(fileLocation.toString()); + + // Upon successfully completing a file part, pull another + // file part off the queue to trigger asynchronous processing + process(queue.poll()); + } catch (Exception e) { + processFailure(e); + } + } + }, executor); + } + + private void processFailure(Exception e) { + if (anyPartStreamFailed.getAndSet(true) == false) { + completionListener.onFailure(e); + + // Drain the queue of pending part downloads. These can be discarded + // since they haven't started any work yet, but the listener must be + // notified for each part. + Object item = queue.poll(); + while (item != null) { + completionListener.onFailure(CANCELED_PART_EXCEPTION); + item = queue.poll(); + } + } else { + completionListener.onFailure(e); + } + try { + Files.deleteIfExists(fileLocation); + } catch (IOException ex) { + // Die silently + logger.info("Failed to delete file {} on stream failure: {}", fileLocation, ex); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/package-info.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/package-info.java new file mode 100644 index 0000000000000..fe670fe3eb25c --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/package-info.java @@ -0,0 +1,14 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Abstractions for stream based file reads from the blob store. + * Provides listeners for performing the necessary async read operations to perform + * multi stream reads for blobs from the container. + * */ +package org.opensearch.common.blobstore.stream.read.listener; diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/package-info.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/package-info.java new file mode 100644 index 0000000000000..a9e2ca35c1fa6 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/package-info.java @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Abstractions for stream based file reads from the blob store. + * Provides support for async reads from the blob container. + * */ +package org.opensearch.common.blobstore.stream.read; diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/write/WriteContext.java b/server/src/main/java/org/opensearch/common/blobstore/stream/write/WriteContext.java index ef5e3d1e8c26c..e74462f82400d 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/write/WriteContext.java +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/write/WriteContext.java @@ -61,6 +61,20 @@ public WriteContext( this.expectedChecksum = expectedChecksum; } + /** + * Copy constructor used by overriding class + */ + protected WriteContext(WriteContext writeContext) { + this.fileName = writeContext.fileName; + this.streamContextSupplier = writeContext.streamContextSupplier; + this.fileSize = writeContext.fileSize; + this.failIfAlreadyExists = writeContext.failIfAlreadyExists; + this.writePriority = writeContext.writePriority; + this.uploadFinalizer = writeContext.uploadFinalizer; + this.doRemoteDataIntegrityCheck = writeContext.doRemoteDataIntegrityCheck; + this.expectedChecksum = writeContext.expectedChecksum; + } + /** * @return The file name */ diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/write/WritePriority.java b/server/src/main/java/org/opensearch/common/blobstore/stream/write/WritePriority.java index b8c0b52f93a3c..3f341c878c3c7 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/write/WritePriority.java +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/write/WritePriority.java @@ -15,5 +15,6 @@ */ public enum WritePriority { NORMAL, - HIGH + HIGH, + URGENT } diff --git a/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java b/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java index c7cfef5c5ce3d..5808f51f01efc 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java @@ -10,7 +10,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.store.IndexInput; import org.opensearch.common.CheckedTriFunction; import org.opensearch.common.SetOnce; import org.opensearch.common.StreamContext; @@ -19,11 +21,13 @@ import org.opensearch.common.blobstore.transfer.stream.OffsetRangeInputStream; import org.opensearch.common.blobstore.transfer.stream.ResettableCheckedInputStream; import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.common.util.ByteUtils; import java.io.Closeable; import java.io.IOException; import java.io.InputStream; import java.util.Objects; +import java.util.zip.CRC32; import com.jcraft.jzlib.JZlib; @@ -244,4 +248,17 @@ public void close() throws IOException { throw new IOException("Closure of some of the multi-part streams failed."); } } + + /** + * Compute final checksum for IndexInput container checksum footer added by {@link CodecUtil} + * @param indexInput IndexInput with checksum in footer + * @param checksumBytesLength length of checksum bytes + * @return final computed checksum of entire indexInput + */ + public static long checksumOfChecksum(IndexInput indexInput, int checksumBytesLength) throws IOException { + long storedChecksum = CodecUtil.retrieveChecksum(indexInput); + CRC32 checksumOfChecksum = new CRC32(); + checksumOfChecksum.update(ByteUtils.toByteArrayBE(storedChecksum)); + return JZlib.crc32_combine(storedChecksum, checksumOfChecksum.getValue(), checksumBytesLength); + } } diff --git a/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStream.java b/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStream.java new file mode 100644 index 0000000000000..b455999bbed0c --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStream.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.transfer.stream; + +import org.apache.lucene.store.RateLimiter; +import org.opensearch.common.StreamLimiter; + +import java.io.IOException; +import java.util.function.Supplier; + +/** + * Rate Limits an {@link OffsetRangeInputStream} + * + * @opensearch.internal + */ +public class RateLimitingOffsetRangeInputStream extends OffsetRangeInputStream { + + private final StreamLimiter streamLimiter; + + private final OffsetRangeInputStream delegate; + + /** + * The ctor for RateLimitingOffsetRangeInputStream + * @param delegate the underlying {@link OffsetRangeInputStream} + * @param rateLimiterSupplier the supplier for {@link RateLimiter} + * @param listener the listener to be invoked on rate limits + */ + public RateLimitingOffsetRangeInputStream( + OffsetRangeInputStream delegate, + Supplier rateLimiterSupplier, + StreamLimiter.Listener listener + ) { + this.streamLimiter = new StreamLimiter(rateLimiterSupplier, listener); + this.delegate = delegate; + } + + @Override + public int read() throws IOException { + int b = delegate.read(); + streamLimiter.maybePause(1); + return b; + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + int n = delegate.read(b, off, len); + if (n > 0) { + streamLimiter.maybePause(n); + } + return n; + } + + @Override + public synchronized void mark(int readlimit) { + delegate.mark(readlimit); + } + + @Override + public boolean markSupported() { + return delegate.markSupported(); + } + + @Override + public long getFilePointer() throws IOException { + return delegate.getFilePointer(); + } + + @Override + public synchronized void reset() throws IOException { + delegate.reset(); + } + + @Override + public void close() throws IOException { + delegate.close(); + } +} diff --git a/server/src/main/java/org/opensearch/common/breaker/ChildMemoryCircuitBreaker.java b/server/src/main/java/org/opensearch/common/breaker/ChildMemoryCircuitBreaker.java index de4e6ad433c55..c9b498c3ec6fa 100644 --- a/server/src/main/java/org/opensearch/common/breaker/ChildMemoryCircuitBreaker.java +++ b/server/src/main/java/org/opensearch/common/breaker/ChildMemoryCircuitBreaker.java @@ -204,7 +204,7 @@ private long limit(long bytes, String label, double overheadConstant, long memor /** * Add an exact number of bytes, not checking for tripping the * circuit breaker. This bypasses the overheadConstant multiplication. - * + *

                  * Also does not check with the parent breaker to see if the parent limit * has been exceeded. * diff --git a/server/src/main/java/org/opensearch/common/cache/Cache.java b/server/src/main/java/org/opensearch/common/cache/Cache.java index 0ebef1556424b..0b2b608b55df0 100644 --- a/server/src/main/java/org/opensearch/common/cache/Cache.java +++ b/server/src/main/java/org/opensearch/common/cache/Cache.java @@ -403,7 +403,7 @@ private V get(K key, long now, Consumer> onExpiration) { * If the specified key is not already associated with a value (or is mapped to null), attempts to compute its * value using the given mapping function and enters it into this map unless null. The load method for a given key * will be invoked at most once. - * + *

                  * Use of different {@link CacheLoader} implementations on the same key concurrently may result in only the first * loader function being called and the second will be returned the result provided by the first including any exceptions * thrown during the execution of the first. diff --git a/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java b/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java index 5ce77cdc75fe5..de4304f0e1fba 100644 --- a/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java +++ b/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java @@ -49,15 +49,15 @@ /** * An immutable map whose writes result in a new copy of the map to be created. - * + *

                  * This is essentially a hash array mapped trie: inner nodes use a bitmap in * order to map hashes to slots by counting ones. In case of a collision (two * values having the same 32-bits hash), a leaf node is created which stores * and searches for values sequentially. - * + *

                  * Reads and writes both perform in logarithmic time. Null keys and values are * not supported. - * + *

                  * This structure might need to perform several object creations per write so * it is better suited for work-loads that are not too write-intensive. * @@ -250,7 +250,7 @@ public static T[] insertElement(final T[] array, final T element, final int * and use a bitmap in order to associate hashes to them. For example, if * an inner node contains 5 values, then 5 bits will be set in the bitmap * and the ordinal of the bit set in this bit map will be the slot number. - * + *

                  * As a consequence, the number of slots in an inner node is equal to the * number of one bits in the bitmap. * diff --git a/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java b/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java index 0334d367ffdbc..1622457ba27cc 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java +++ b/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java @@ -221,11 +221,11 @@ void validateLinearRing(CoordinateNode coordinates, boolean coerce) { @Override CoordinateNode validate(CoordinateNode coordinates, boolean coerce) { - /** - * Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring) - * A LinearRing is closed LineString with 4 or more positions. The first and last positions - * are equivalent (they represent equivalent points). Though a LinearRing is not explicitly - * represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition. + /* + Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring) + A LinearRing is closed LineString with 4 or more positions. The first and last positions + are equivalent (they represent equivalent points). Though a LinearRing is not explicitly + represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition. */ if (coordinates.children == null || coordinates.children.isEmpty()) { throw new OpenSearchParseException( diff --git a/server/src/main/java/org/opensearch/common/geo/GeoUtils.java b/server/src/main/java/org/opensearch/common/geo/GeoUtils.java index 393c238cb3b2f..8c566c4191e4f 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/opensearch/common/geo/GeoUtils.java @@ -665,7 +665,7 @@ public static GeoPoint parseFromString(String val) { /** * Parse a precision that can be expressed as an integer or a distance measure like "1km", "10m". - * + *

                  * The precision is expressed as a number between 1 and 12 and indicates the length of geohash * used to represent geo points. * @@ -696,7 +696,7 @@ public static int parsePrecision(XContentParser parser) throws IOException, Open /** * Checks that the precision is within range supported by opensearch - between 1 and 12 - * + *

                  * Returns the precision value if it is in the range and throws an IllegalArgumentException if it * is outside the range. */ diff --git a/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java b/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java index 56146fc8197be..93c7f4b93679a 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java +++ b/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java @@ -63,7 +63,7 @@ public interface GeometryFormat { /** * Serializes the geometry into a standard Java object. - * + *

                  * For example, the GeoJson format returns the geometry as a map, while WKT returns a string. */ Object toXContentAsObject(ParsedFormat geometry); diff --git a/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java index 8e65352142e69..5fa70a191e7d7 100644 --- a/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java +++ b/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java @@ -177,11 +177,11 @@ public PolygonBuilder close() { } private static void validateLinearRing(LineStringBuilder lineString) { - /** - * Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring) - * A LinearRing is closed LineString with 4 or more positions. The first and last positions - * are equivalent (they represent equivalent points). Though a LinearRing is not explicitly - * represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition. + /* + Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring) + A LinearRing is closed LineString with 4 or more positions. The first and last positions + are equivalent (they represent equivalent points). Though a LinearRing is not explicitly + represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition. */ List points = lineString.coordinates; if (points.size() < 4) { diff --git a/server/src/main/java/org/opensearch/common/geo/parsers/GeoJsonParser.java b/server/src/main/java/org/opensearch/common/geo/parsers/GeoJsonParser.java index 37d42ce600b6d..8d473ae6721d2 100644 --- a/server/src/main/java/org/opensearch/common/geo/parsers/GeoJsonParser.java +++ b/server/src/main/java/org/opensearch/common/geo/parsers/GeoJsonParser.java @@ -52,7 +52,7 @@ /** * Parses shape geometry represented in geojson - * + *

                  * complies with geojson specification: https://tools.ietf.org/html/rfc7946 * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java b/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java index d99d1daf46a2a..b199da0f3691a 100644 --- a/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java +++ b/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java @@ -58,7 +58,7 @@ /** * Parses shape geometry represented in WKT format - * + *

                  * complies with OGC® document: 12-063r5 and ISO/IEC 13249-3:2016 standard * located at http://docs.opengeospatial.org/is/12-063r5/12-063r5.html * diff --git a/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java b/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java index 8ba0bd7ee1be4..e481ffd460798 100644 --- a/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java +++ b/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java @@ -93,7 +93,7 @@ protected static long fmix(long k) { /** * Compute the hash of the MurmurHash3_x64_128 hashing function. - * + *

                  * Note, this hashing function might be used to persist hashes, so if the way hashes are computed * changes for some reason, it needs to be addressed (like in BloomFilter and MurmurHashField). */ diff --git a/server/src/main/java/org/opensearch/common/inject/Initializer.java b/server/src/main/java/org/opensearch/common/inject/Initializer.java index e806eba6df707..b88b01c03c018 100644 --- a/server/src/main/java/org/opensearch/common/inject/Initializer.java +++ b/server/src/main/java/org/opensearch/common/inject/Initializer.java @@ -68,9 +68,8 @@ class Initializer { /** * Registers an instance for member injection when that step is performed. * - * @param instance an instance that optionally has members to be injected (each annotated with - * @param source the source location that this injection was requested - * @Inject). + * @param instance an instance that optionally has members to be injected (each annotated with {@code @Inject}). + * @param source the source location that this injection was requested */ public Initializable requestInjection(InjectorImpl injector, T instance, Object source, Set injectionPoints) { Objects.requireNonNull(source); diff --git a/server/src/main/java/org/opensearch/common/inject/Module.java b/server/src/main/java/org/opensearch/common/inject/Module.java index b1fc031192ea0..e66044ff26c40 100644 --- a/server/src/main/java/org/opensearch/common/inject/Module.java +++ b/server/src/main/java/org/opensearch/common/inject/Module.java @@ -29,6 +29,8 @@ package org.opensearch.common.inject; +import org.opensearch.common.annotation.PublicApi; + /** * A module contributes configuration information, typically interface * bindings, which will be used to create an {@link Injector}. A Guice-based @@ -43,8 +45,9 @@ * Use scope and binding annotations on these methods to configure the * bindings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface Module { /** diff --git a/server/src/main/java/org/opensearch/common/io/stream/Streamables.java b/server/src/main/java/org/opensearch/common/io/stream/Streamables.java index 1e5f8164e8fb9..76e133ecd1599 100644 --- a/server/src/main/java/org/opensearch/common/io/stream/Streamables.java +++ b/server/src/main/java/org/opensearch/common/io/stream/Streamables.java @@ -47,7 +47,7 @@ public static void registerStreamables() { * Registers writers by class type */ private static void registerWriters() { - /** {@link ReadableInstant} */ + /* {@link ReadableInstant} */ WriteableRegistry.registerWriter(ReadableInstant.class, (o, v) -> { o.writeByte((byte) 13); final ReadableInstant instant = (ReadableInstant) v; @@ -55,7 +55,7 @@ private static void registerWriters() { o.writeLong(instant.getMillis()); }); WriteableRegistry.registerClassAlias(ReadableInstant.class, ReadableInstant.class); - /** {@link JodaCompatibleZonedDateTime} */ + /* {@link JodaCompatibleZonedDateTime} */ WriteableRegistry.registerWriter(JodaCompatibleZonedDateTime.class, (o, v) -> { // write the joda compatibility datetime as joda datetime o.writeByte((byte) 13); @@ -65,7 +65,7 @@ private static void registerWriters() { o.writeString(zoneId.equals("Z") ? DateTimeZone.UTC.getID() : zoneId); o.writeLong(zonedDateTime.toInstant().toEpochMilli()); }); - /** {@link GeoPoint} */ + /* {@link GeoPoint} */ WriteableRegistry.registerWriter(GeoPoint.class, (o, v) -> { o.writeByte((byte) 22); ((GeoPoint) v).writeTo(o); diff --git a/server/src/main/java/org/opensearch/common/joda/JodaDateFormatter.java b/server/src/main/java/org/opensearch/common/joda/JodaDateFormatter.java index 12d48a0b362ce..bf25e5b1b3923 100644 --- a/server/src/main/java/org/opensearch/common/joda/JodaDateFormatter.java +++ b/server/src/main/java/org/opensearch/common/joda/JodaDateFormatter.java @@ -125,6 +125,11 @@ public String pattern() { return pattern; } + @Override + public String printPattern() { + throw new UnsupportedOperationException("JodaDateFormatter does not have a print pattern"); + } + @Override public Locale locale() { return printer.getLocale(); diff --git a/server/src/main/java/org/opensearch/common/joda/JodaDateMathParser.java b/server/src/main/java/org/opensearch/common/joda/JodaDateMathParser.java index 0de6dec1c25bd..ae38e9a6a8073 100644 --- a/server/src/main/java/org/opensearch/common/joda/JodaDateMathParser.java +++ b/server/src/main/java/org/opensearch/common/joda/JodaDateMathParser.java @@ -46,7 +46,7 @@ /** * A parser for date/time formatted text with optional date math. - * + *

                  * The format of the datetime is configurable, and unix timestamps can also be used. Datemath * is appended to a datetime with the following syntax: * ||[+-/](\d+)?[yMwdhHms]. diff --git a/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java index e259d5d9e3e33..ed324e4e62d8f 100644 --- a/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java +++ b/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java @@ -47,7 +47,7 @@ * Outputs the Throwable portion of the LoggingEvent as a Json formatted field with array * "exception": [ "stacktrace", "lines", "as", "array", "elements" ] - * + *

                  * Reusing @link org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter which already converts a Throwable from * LoggingEvent into a multiline string * diff --git a/server/src/main/java/org/opensearch/common/lucene/Lucene.java b/server/src/main/java/org/opensearch/common/lucene/Lucene.java index c61b1bcc676a6..e42c119efb3ee 100644 --- a/server/src/main/java/org/opensearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/opensearch/common/lucene/Lucene.java @@ -77,6 +77,7 @@ import org.apache.lucene.search.Explanation; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.KnnCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreMode; @@ -1152,14 +1153,10 @@ public ByteVectorValues getByteVectorValues(String field) throws IOException { } @Override - public TopDocs searchNearestVectors(String field, byte[] target, int k, Bits acceptDocs, int visitedLimit) throws IOException { - return null; - } + public void searchNearestVectors(String field, byte[] target, KnnCollector k, Bits acceptDocs) throws IOException {} @Override - public TopDocs searchNearestVectors(String field, float[] target, int k, Bits acceptDocs, int visitedLimit) throws IOException { - return null; - } + public void searchNearestVectors(String field, float[] target, KnnCollector k, Bits acceptDocs) throws IOException {} }; } } diff --git a/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java b/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java index 0ffd633e5a967..17b75ab22f3ed 100644 --- a/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java +++ b/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java @@ -55,7 +55,7 @@ * mappings as segments that were not known before are added and prevents the * structure from growing indefinitely by registering close listeners on these * segments so that at any time it only tracks live segments. - * + *

                  * NOTE: This is heavy. Avoid using this class unless absolutely required. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/common/metrics/CounterMetric.java b/server/src/main/java/org/opensearch/common/metrics/CounterMetric.java index 5c48c1f772ff0..cb181840406a5 100644 --- a/server/src/main/java/org/opensearch/common/metrics/CounterMetric.java +++ b/server/src/main/java/org/opensearch/common/metrics/CounterMetric.java @@ -62,4 +62,5 @@ public void dec(long n) { public long count() { return counter.sum(); } + } diff --git a/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java b/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java index 33f12c8cb42d3..359facdce633b 100644 --- a/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java +++ b/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java @@ -79,4 +79,5 @@ public void clear() { counter.reset(); sum.reset(); } + } diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java index 3539ea7f3f526..0734659d8ee72 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java @@ -57,6 +57,7 @@ import org.opensearch.plugins.NetworkPlugin; import org.opensearch.tasks.RawTaskStatus; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportInterceptor; @@ -147,7 +148,8 @@ public NetworkModule( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { this.settings = settings; for (NetworkPlugin plugin : plugins) { @@ -160,7 +162,8 @@ public NetworkModule( xContentRegistry, networkService, dispatcher, - clusterSettings + clusterSettings, + tracer ); for (Map.Entry> entry : httpTransportFactory.entrySet()) { registerHttpTransport(entry.getKey(), entry.getValue()); @@ -171,7 +174,8 @@ public NetworkModule( pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, - networkService + networkService, + tracer ); for (Map.Entry> entry : transportFactory.entrySet()) { registerTransport(entry.getKey(), entry.getValue()); diff --git a/server/src/main/java/org/opensearch/common/rounding/DateTimeUnit.java b/server/src/main/java/org/opensearch/common/rounding/DateTimeUnit.java deleted file mode 100644 index 47e182b3caf84..0000000000000 --- a/server/src/main/java/org/opensearch/common/rounding/DateTimeUnit.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.rounding; - -import org.opensearch.OpenSearchException; -import org.opensearch.common.joda.Joda; -import org.joda.time.DateTimeField; -import org.joda.time.DateTimeZone; -import org.joda.time.chrono.ISOChronology; - -import java.util.function.Function; - -/** - * Main date time unit class. - * - * @opensearch.internal - */ -public enum DateTimeUnit { - - WEEK_OF_WEEKYEAR((byte) 1, tz -> ISOChronology.getInstance(tz).weekOfWeekyear()), - YEAR_OF_CENTURY((byte) 2, tz -> ISOChronology.getInstance(tz).yearOfCentury()), - QUARTER((byte) 3, tz -> Joda.QuarterOfYear.getField(ISOChronology.getInstance(tz))), - MONTH_OF_YEAR((byte) 4, tz -> ISOChronology.getInstance(tz).monthOfYear()), - DAY_OF_MONTH((byte) 5, tz -> ISOChronology.getInstance(tz).dayOfMonth()), - HOUR_OF_DAY((byte) 6, tz -> ISOChronology.getInstance(tz).hourOfDay()), - MINUTES_OF_HOUR((byte) 7, tz -> ISOChronology.getInstance(tz).minuteOfHour()), - SECOND_OF_MINUTE((byte) 8, tz -> ISOChronology.getInstance(tz).secondOfMinute()); - - private final byte id; - private final Function fieldFunction; - - DateTimeUnit(byte id, Function fieldFunction) { - this.id = id; - this.fieldFunction = fieldFunction; - } - - public byte id() { - return id; - } - - /** - * @return the {@link DateTimeField} for the provided {@link DateTimeZone} for this time unit - */ - public DateTimeField field(DateTimeZone tz) { - return fieldFunction.apply(tz); - } - - public static DateTimeUnit resolve(byte id) { - switch (id) { - case 1: - return WEEK_OF_WEEKYEAR; - case 2: - return YEAR_OF_CENTURY; - case 3: - return QUARTER; - case 4: - return MONTH_OF_YEAR; - case 5: - return DAY_OF_MONTH; - case 6: - return HOUR_OF_DAY; - case 7: - return MINUTES_OF_HOUR; - case 8: - return SECOND_OF_MINUTE; - default: - throw new OpenSearchException("Unknown date time unit id [" + id + "]"); - } - } -} diff --git a/server/src/main/java/org/opensearch/common/rounding/Rounding.java b/server/src/main/java/org/opensearch/common/rounding/Rounding.java deleted file mode 100644 index 857031bc783f4..0000000000000 --- a/server/src/main/java/org/opensearch/common/rounding/Rounding.java +++ /dev/null @@ -1,459 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.rounding; - -import org.opensearch.OpenSearchException; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; -import org.joda.time.DateTimeField; -import org.joda.time.DateTimeZone; -import org.joda.time.IllegalInstantException; - -import java.io.IOException; -import java.util.Objects; - -/** - * A strategy for rounding long values. - * - * Use the java based Rounding class where applicable - * - * @opensearch.internal - */ -@Deprecated -public abstract class Rounding implements Writeable { - - public abstract byte id(); - - /** - * Rounds the given value. - */ - public abstract long round(long value); - - /** - * Given the rounded value (which was potentially generated by {@link #round(long)}, returns the next rounding value. For example, with - * interval based rounding, if the interval is 3, {@code nextRoundValue(6) = 9 }. - * - * @param value The current rounding value - * @return The next rounding value; - */ - public abstract long nextRoundingValue(long value); - - @Override - public abstract boolean equals(Object obj); - - @Override - public abstract int hashCode(); - - public static Builder builder(DateTimeUnit unit) { - return new Builder(unit); - } - - public static Builder builder(TimeValue interval) { - return new Builder(interval); - } - - /** - * Builder for rounding - * - * @opensearch.internal - */ - public static class Builder { - - private final DateTimeUnit unit; - private final long interval; - - private DateTimeZone timeZone = DateTimeZone.UTC; - - public Builder(DateTimeUnit unit) { - this.unit = unit; - this.interval = -1; - } - - public Builder(TimeValue interval) { - this.unit = null; - if (interval.millis() < 1) throw new IllegalArgumentException("Zero or negative time interval not supported"); - this.interval = interval.millis(); - } - - public Builder timeZone(DateTimeZone timeZone) { - if (timeZone == null) { - throw new IllegalArgumentException("Setting null as timezone is not supported"); - } - this.timeZone = timeZone; - return this; - } - - public Rounding build() { - Rounding timeZoneRounding; - if (unit != null) { - timeZoneRounding = new TimeUnitRounding(unit, timeZone); - } else { - timeZoneRounding = new TimeIntervalRounding(interval, timeZone); - } - return timeZoneRounding; - } - } - - /** - * Rounding time units - * - * @opensearch.internal - */ - static class TimeUnitRounding extends Rounding { - - static final byte ID = 1; - - private final DateTimeUnit unit; - private final DateTimeField field; - private final DateTimeZone timeZone; - private final boolean unitRoundsToMidnight; - - TimeUnitRounding(DateTimeUnit unit, DateTimeZone timeZone) { - this.unit = unit; - this.field = unit.field(timeZone); - unitRoundsToMidnight = this.field.getDurationField().getUnitMillis() > 60L * 60L * 1000L; - this.timeZone = timeZone; - } - - TimeUnitRounding(StreamInput in) throws IOException { - unit = DateTimeUnit.resolve(in.readByte()); - timeZone = DateTimeZone.forID(in.readString()); - field = unit.field(timeZone); - unitRoundsToMidnight = field.getDurationField().getUnitMillis() > 60L * 60L * 1000L; - } - - @Override - public byte id() { - return ID; - } - - /** - * @return The latest timestamp T which is strictly before utcMillis - * and such that timeZone.getOffset(T) != timeZone.getOffset(utcMillis). - * If there is no such T, returns Long.MAX_VALUE. - */ - private long previousTransition(long utcMillis) { - final int offsetAtInputTime = timeZone.getOffset(utcMillis); - do { - // Some timezones have transitions that do not change the offset, so we have to - // repeatedly call previousTransition until a nontrivial transition is found. - - long previousTransition = timeZone.previousTransition(utcMillis); - if (previousTransition == utcMillis) { - // There are no earlier transitions - return Long.MAX_VALUE; - } - assert previousTransition < utcMillis; // Progress was made - utcMillis = previousTransition; - } while (timeZone.getOffset(utcMillis) == offsetAtInputTime); - - return utcMillis; - } - - @Override - public long round(long utcMillis) { - - // field.roundFloor() works as long as the offset doesn't change. It is worth getting this case out of the way first, as - // the calculations for fixing things near to offset changes are a little expensive and are unnecessary in the common case - // of working in UTC. - if (timeZone.isFixed()) { - return field.roundFloor(utcMillis); - } - - // When rounding to hours we consider any local time of the form 'xx:00:00' as rounded, even though this gives duplicate - // bucket names for the times when the clocks go back. Shorter units behave similarly. However, longer units round down to - // midnight, and on the days where there are two midnights we would rather pick the earlier one, so that buckets are - // uniquely identified by the date. - if (unitRoundsToMidnight) { - final long anyLocalStartOfDay = field.roundFloor(utcMillis); - // `anyLocalStartOfDay` is _supposed_ to be the Unix timestamp for the start of the day in question in the current time - // zone. Mostly this just means "midnight", which is fine, and on days with no local midnight it's the first time that - // does occur on that day which is also ok. However, on days with >1 local midnight this is _one_ of the midnights, but - // may not be the first. Check whether this is happening, and fix it if so. - - final long previousTransition = previousTransition(anyLocalStartOfDay); - - if (previousTransition == Long.MAX_VALUE) { - // No previous transitions, so there can't be another earlier local midnight. - return anyLocalStartOfDay; - } - - final long currentOffset = timeZone.getOffset(anyLocalStartOfDay); - final long previousOffset = timeZone.getOffset(previousTransition); - assert currentOffset != previousOffset; - - // NB we only assume interference from one previous transition. It's theoretically possible to have two transitions in - // quick succession, both of which have a midnight in them, but this doesn't appear to happen in the TZDB so (a) it's - // pointless to implement and (b) it won't be tested. I recognise that this comment is tempting fate and will likely - // cause this very situation to occur in the near future, and eagerly look forward to fixing this using a loop over - // previous transitions when it happens. - - final long alsoLocalStartOfDay = anyLocalStartOfDay + currentOffset - previousOffset; - // `alsoLocalStartOfDay` is the Unix timestamp for the start of the day in question if the previous offset were in - // effect. - - if (alsoLocalStartOfDay <= previousTransition) { - // Therefore the previous offset _is_ in effect at `alsoLocalStartOfDay`, and it's earlier than anyLocalStartOfDay, - // so this is the answer to use. - return alsoLocalStartOfDay; - } else { - // The previous offset is not in effect at `alsoLocalStartOfDay`, so the current offset must be. - return anyLocalStartOfDay; - } - - } else { - do { - long rounded = field.roundFloor(utcMillis); - - // field.roundFloor() mostly works as long as the offset hasn't changed in [rounded, utcMillis], so look at where - // the offset most recently changed. - - final long previousTransition = previousTransition(utcMillis); - - if (previousTransition == Long.MAX_VALUE || previousTransition < rounded) { - // The offset did not change in [rounded, utcMillis], so roundFloor() worked as expected. - return rounded; - } - - // The offset _did_ change in [rounded, utcMillis]. Put differently, this means that none of the times in - // [previousTransition+1, utcMillis] were rounded, so the rounded time must be <= previousTransition. This means - // it's sufficient to try and round previousTransition down. - assert previousTransition < utcMillis; - utcMillis = previousTransition; - } while (true); - } - } - - @Override - public long nextRoundingValue(long utcMillis) { - long floor = round(utcMillis); - // add one unit and round to get to next rounded value - long next = round(field.add(floor, 1)); - if (next == floor) { - // in rare case we need to add more than one unit - next = round(field.add(floor, 2)); - } - return next; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeByte(unit.id()); - out.writeString(timeZone.getID()); - } - - @Override - public int hashCode() { - return Objects.hash(unit, timeZone); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - TimeUnitRounding other = (TimeUnitRounding) obj; - return Objects.equals(unit, other.unit) && Objects.equals(timeZone, other.timeZone); - } - - @Override - public String toString() { - return "[" + timeZone + "][" + unit + "]"; - } - } - - /** - * Rounding time intervals - * - * @opensearch.internal - */ - static class TimeIntervalRounding extends Rounding { - - static final byte ID = 2; - - private final long interval; - private final DateTimeZone timeZone; - - TimeIntervalRounding(long interval, DateTimeZone timeZone) { - if (interval < 1) throw new IllegalArgumentException("Zero or negative time interval not supported"); - this.interval = interval; - this.timeZone = timeZone; - } - - TimeIntervalRounding(StreamInput in) throws IOException { - interval = in.readVLong(); - timeZone = DateTimeZone.forID(in.readString()); - } - - @Override - public byte id() { - return ID; - } - - @Override - public long round(long utcMillis) { - long timeLocal = timeZone.convertUTCToLocal(utcMillis); - long rounded = roundKey(timeLocal, interval) * interval; - long roundedUTC; - if (isInDSTGap(rounded) == false) { - roundedUTC = timeZone.convertLocalToUTC(rounded, true, utcMillis); - // check if we crossed DST transition, in this case we want the - // last rounded value before the transition - long transition = timeZone.previousTransition(utcMillis); - if (transition != utcMillis && transition > roundedUTC) { - roundedUTC = round(transition - 1); - } - } else { - /* - * Edge case where the rounded local time is illegal and landed - * in a DST gap. In this case, we choose 1ms tick after the - * transition date. We don't want the transition date itself - * because those dates, when rounded themselves, fall into the - * previous interval. This would violate the invariant that the - * rounding operation should be idempotent. - */ - roundedUTC = timeZone.previousTransition(utcMillis) + 1; - } - return roundedUTC; - } - - private static long roundKey(long value, long interval) { - if (value < 0) { - return (value - interval + 1) / interval; - } else { - return value / interval; - } - } - - /** - * Determine whether the local instant is a valid instant in the given - * time zone. The logic for this is taken from - * {@link DateTimeZone#convertLocalToUTC(long, boolean)} for the - * `strict` mode case, but instead of throwing an - * {@link IllegalInstantException}, which is costly, we want to return a - * flag indicating that the value is illegal in that time zone. - */ - private boolean isInDSTGap(long instantLocal) { - if (timeZone.isFixed()) { - return false; - } - // get the offset at instantLocal (first estimate) - int offsetLocal = timeZone.getOffset(instantLocal); - // adjust instantLocal using the estimate and recalc the offset - int offset = timeZone.getOffset(instantLocal - offsetLocal); - // if the offsets differ, we must be near a DST boundary - if (offsetLocal != offset) { - // determine if we are in the DST gap - long nextLocal = timeZone.nextTransition(instantLocal - offsetLocal); - if (nextLocal == (instantLocal - offsetLocal)) { - nextLocal = Long.MAX_VALUE; - } - long nextAdjusted = timeZone.nextTransition(instantLocal - offset); - if (nextAdjusted == (instantLocal - offset)) { - nextAdjusted = Long.MAX_VALUE; - } - if (nextLocal != nextAdjusted) { - // we are in the DST gap - return true; - } - } - return false; - } - - @Override - public long nextRoundingValue(long time) { - long timeLocal = time; - timeLocal = timeZone.convertUTCToLocal(time); - long next = timeLocal + interval; - return timeZone.convertLocalToUTC(next, false); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(interval); - out.writeString(timeZone.getID()); - } - - @Override - public int hashCode() { - return Objects.hash(interval, timeZone); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - TimeIntervalRounding other = (TimeIntervalRounding) obj; - return Objects.equals(interval, other.interval) && Objects.equals(timeZone, other.timeZone); - } - } - - /** - * Rounding streams - * - * @opensearch.internal - */ - public static class Streams { - - public static void write(Rounding rounding, StreamOutput out) throws IOException { - out.writeByte(rounding.id()); - rounding.writeTo(out); - } - - public static Rounding read(StreamInput in) throws IOException { - Rounding rounding; - byte id = in.readByte(); - switch (id) { - case TimeUnitRounding.ID: - rounding = new TimeUnitRounding(in); - break; - case TimeIntervalRounding.ID: - rounding = new TimeIntervalRounding(in); - break; - default: - throw new OpenSearchException("unknown rounding id [" + id + "]"); - } - return rounding; - } - - } - -} diff --git a/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java index 8a19d309975df..4b23c0e3808a7 100644 --- a/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java @@ -256,7 +256,7 @@ public synchronized void addSettingsUpdateConsumer(Setting setting, Consu /** * Adds a settings consumer that is only executed if any setting in the supplied list of settings is changed. In that case all the * settings are specified in the argument are returned. - * + *

                  * Also automatically adds empty consumers for all settings in order to activate logging */ public synchronized void addSettingsUpdateConsumer(Consumer consumer, List> settings) { @@ -267,7 +267,7 @@ public synchronized void addSettingsUpdateConsumer(Consumer consumer, * Adds a settings consumer that is only executed if any setting in the supplied list of settings is changed. In that case all the * settings are specified in the argument are returned. The validator is run across all specified settings before the settings are * applied. - * + *

                  * Also automatically adds empty consumers for all settings in order to activate logging */ public synchronized void addSettingsUpdateConsumer( diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 998722c2a47e7..0ea3008347ce7 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -79,6 +79,7 @@ import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.Loggers; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.network.NetworkService; @@ -97,6 +98,7 @@ import org.opensearch.gateway.DanglingIndicesState; import org.opensearch.gateway.GatewayService; import org.opensearch.gateway.PersistedClusterStateService; +import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.http.HttpTransportSettings; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; @@ -106,6 +108,7 @@ import org.opensearch.index.ShardIndexingPressureSettings; import org.opensearch.index.ShardIndexingPressureStore; import org.opensearch.index.remote.RemoteStorePressureSettings; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.indices.IndexingMemoryController; import org.opensearch.indices.IndicesQueryCache; @@ -127,13 +130,14 @@ import org.opensearch.node.Node; import org.opensearch.node.Node.DiscoverySettings; import org.opensearch.node.NodeRoleSettings; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.node.resource.tracker.ResourceTrackerSettings; import org.opensearch.persistent.PersistentTasksClusterService; import org.opensearch.persistent.decider.EnableAssignmentDecider; import org.opensearch.plugins.PluginsService; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.rest.BaseRestHandler; import org.opensearch.script.ScriptService; -import org.opensearch.search.SearchBootstrapSettings; import org.opensearch.search.SearchModule; import org.opensearch.search.SearchService; import org.opensearch.search.aggregations.MultiBucketConsumerService; @@ -168,8 +172,9 @@ /** * Encapsulates all valid cluster level settings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ClusterSettings extends AbstractScopedSettings { public ClusterSettings(final Settings nodeSettings, final Set> settingsSet) { @@ -262,6 +267,8 @@ public void apply(Settings value, Settings current, Settings previous) { IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING, IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING, IndicesQueryCache.INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING, + IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING, + IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING, IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING, IndicesService.WRITE_DANGLING_INDICES_INFO_SETTING, IndicesService.CLUSTER_REPLICATION_TYPE_SETTING, @@ -282,6 +289,7 @@ public void apply(Settings value, Settings current, Settings previous) { RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING, + RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, @@ -368,6 +376,9 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, TransportSearchAction.SHARD_COUNT_LIMIT_SETTING, TransportSearchAction.SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING, + TransportSearchAction.SEARCH_REQUEST_STATS_ENABLED, + TransportSearchAction.SEARCH_PHASE_TOOK_ENABLED, + TransportSearchAction.SEARCH_QUERY_METRICS_ENABLED_SETTING, RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE, SniffConnectionStrategy.REMOTE_CONNECTIONS_PER_CLUSTER, RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, @@ -451,6 +462,7 @@ public void apply(Settings value, Settings current, Settings previous) { NetworkService.TCP_CONNECT_TIMEOUT, IndexSettings.QUERY_STRING_ANALYZE_WILDCARD, IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD, + IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY, ScriptService.SCRIPT_GENERAL_CACHE_SIZE_SETTING, ScriptService.SCRIPT_GENERAL_CACHE_EXPIRE_SETTING, ScriptService.SCRIPT_GENERAL_MAX_COMPILATIONS_RATE_SETTING, @@ -643,9 +655,14 @@ public void apply(Settings value, Settings current, Settings previous) { SearchBackpressureSettings.SETTING_CANCELLATION_BURST, // deprecated SegmentReplicationPressureService.SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED, SegmentReplicationPressureService.MAX_INDEXING_CHECKPOINTS, - SegmentReplicationPressureService.MAX_REPLICATION_TIME_SETTING, + SegmentReplicationPressureService.MAX_REPLICATION_TIME_BACKPRESSURE_SETTING, + SegmentReplicationPressureService.MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING, SegmentReplicationPressureService.MAX_ALLOWED_STALE_SHARDS, + // Settings related to resource trackers + ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING, + ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING, + // Settings related to Searchable Snapshots Node.NODE_SEARCH_CACHE_SIZE_SETTING, FileCache.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING, @@ -655,13 +672,23 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStorePressureSettings.BYTES_LAG_VARIANCE_FACTOR, RemoteStorePressureSettings.UPLOAD_TIME_LAG_VARIANCE_FACTOR, RemoteStorePressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT, - RemoteStorePressureSettings.UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE, - RemoteStorePressureSettings.UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE, - RemoteStorePressureSettings.UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE, + + // Settings related to Remote Store stats + RemoteStoreStatsTrackerFactory.MOVING_AVERAGE_WINDOW_SIZE, // Related to monitoring of task cancellation TaskCancellationMonitoringSettings.IS_ENABLED_SETTING, - TaskCancellationMonitoringSettings.DURATION_MILLIS_SETTING + TaskCancellationMonitoringSettings.DURATION_MILLIS_SETTING, + + // Remote cluster state settings + RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING, + RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, + RemoteClusterStateService.GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, + RemoteClusterStateService.METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, + RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, + IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, + IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, + IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING ) ) ); @@ -674,18 +701,18 @@ public void apply(Settings value, Settings current, Settings previous) { * setting should be moved to {@link #BUILT_IN_CLUSTER_SETTINGS}. */ public static final Map, List> FEATURE_FLAGGED_CLUSTER_SETTINGS = Map.of( - List.of(FeatureFlags.REMOTE_STORE), - List.of( - IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING, - IndicesService.CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING, - IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING - ), List.of(FeatureFlags.CONCURRENT_SEGMENT_SEARCH), List.of( SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, - SearchBootstrapSettings.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING + SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING ), List.of(FeatureFlags.TELEMETRY), - List.of(TelemetrySettings.TRACER_ENABLED_SETTING) + List.of( + TelemetrySettings.TRACER_ENABLED_SETTING, + TelemetrySettings.TRACER_SAMPLER_PROBABILITY, + TelemetrySettings.METRICS_PUBLISH_INTERVAL_SETTING, + TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING, + TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING + ) ); } diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index e109d2a871cef..387b0c9753574 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -36,11 +36,11 @@ protected FeatureFlagSettings( new HashSet<>( Arrays.asList( FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING, - FeatureFlags.REMOTE_STORE_SETTING, FeatureFlags.EXTENSIONS_SETTING, FeatureFlags.IDENTITY_SETTING, FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING, - FeatureFlags.TELEMETRY_SETTING + FeatureFlags.TELEMETRY_SETTING, + FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index f14db4354f196..62e8faf33e1fa 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -45,9 +45,11 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexSortConfig; import org.opensearch.index.IndexingSlowLog; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.LogByteSizeMergePolicyProvider; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.MergeSchedulerConfig; import org.opensearch.index.SearchSlowLog; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.cache.bitset.BitsetFilterCache; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.fielddata.IndexFieldDataService; @@ -76,6 +78,8 @@ public final class IndexScopedSettings extends AbstractScopedSettings { public static final Predicate INDEX_SETTINGS_KEY_PREDICATE = (s) -> s.startsWith(IndexMetadata.INDEX_SETTING_PREFIX); + public static final Predicate ARCHIVED_SETTINGS_KEY_PREDICATE = (s) -> s.startsWith(ARCHIVED_SETTINGS_PREFIX); + public static final Set> BUILT_IN_INDEX_SETTINGS = Collections.unmodifiableSet( new HashSet<>( Arrays.asList( @@ -118,14 +122,14 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING, IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING, - MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, - MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, + TieredMergePolicyProvider.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING, IndexSortConfig.INDEX_SORT_FIELD_SETTING, IndexSortConfig.INDEX_SORT_ORDER_SETTING, IndexSortConfig.INDEX_SORT_MISSING_SETTING, @@ -169,6 +173,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING, IndexSettings.INDEX_SEARCH_IDLE_AFTER, IndexSettings.INDEX_SEARCH_THROTTLED, + IndexSettings.INDEX_UNREFERENCED_FILE_CLEANUP, IndexFieldDataService.INDEX_FIELDDATA_CACHE_KEY, FieldMapper.IGNORE_MALFORMED_SETTING, FieldMapper.COERCE_SETTING, @@ -199,6 +204,13 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_MERGE_ON_FLUSH_ENABLED, IndexSettings.INDEX_MERGE_ON_FLUSH_MAX_FULL_FLUSH_MERGE_WAIT_TIME, IndexSettings.INDEX_MERGE_ON_FLUSH_POLICY, + IndexSettings.INDEX_MERGE_POLICY, + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING, + LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING, IndexSettings.DEFAULT_SEARCH_PIPELINE, // Settings for Searchable Snapshots @@ -209,6 +221,12 @@ public final class IndexScopedSettings extends AbstractScopedSettings { // Settings for remote translog IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, + IndexSettings.INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING, + + // Settings for remote store enablement + IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, + IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING, + IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING, // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { @@ -233,12 +251,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings { * setting should be moved to {@link #BUILT_IN_INDEX_SETTINGS}. */ public static final Map> FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( - FeatureFlags.REMOTE_STORE, - List.of( - IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, - IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING, - IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING - ), FeatureFlags.CONCURRENT_SEGMENT_SEARCH, List.of(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING) ); @@ -273,7 +285,7 @@ public boolean isPrivateSetting(String key) { case IndexMetadata.SETTING_HISTORY_UUID: case IndexMetadata.SETTING_VERSION_UPGRADED: case IndexMetadata.SETTING_INDEX_PROVIDED_NAME: - case MergePolicyConfig.INDEX_MERGE_ENABLED: + case MergePolicyProvider.INDEX_MERGE_ENABLED: // we keep the shrink settings for BWC - this can be removed in 8.0 // we can't remove in 7 since this setting might be baked into an index coming in via a full cluster restart from 6.0 case "index.shrink.source.uuid": diff --git a/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java index f25dd872fc703..1ad3b7ab8875a 100644 --- a/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java @@ -88,7 +88,7 @@ /** * A disk based container for sensitive settings in OpenSearch. - * + *

                  * Loading a keystore has 2 phases. First, call {@link #load(Path)}. Then call * {@link #decrypt(char[])} with the keystore password, or an empty char array if * {@link #hasPassword()} is {@code false}. Loading and decrypting should happen @@ -147,7 +147,7 @@ private static class Entry { /** * The number of bits for the cipher key. - * + *

                  * Note: The Oracle JDK 8 ships with a limited JCE policy that restricts key length for AES to 128 bits. * This can be increased to 256 bits once minimum java 9 is the minimum java version. * See http://www.oracle.com/technetwork/java/javase/terms/readme/jdk9-readme-3852447.html#jce @@ -234,7 +234,7 @@ public static KeyStoreWrapper load(Path configDir) throws IOException { /** * Loads information about the OpenSearch keystore from the provided config directory. - * + *

                  * {@link #decrypt(char[])} must be called before reading or writing any entries. * Returns {@code null} if no keystore exists. */ @@ -358,7 +358,7 @@ private Cipher createCipher(int opmode, char[] password, byte[] salt, byte[] iv) /** * Decrypts the underlying keystore data. - * + *

                  * This may only be called once. */ public void decrypt(char[] password) throws GeneralSecurityException, IOException { diff --git a/server/src/main/java/org/opensearch/common/settings/SecureSetting.java b/server/src/main/java/org/opensearch/common/settings/SecureSetting.java index f2ccc01a4c7e6..1855270b016b3 100644 --- a/server/src/main/java/org/opensearch/common/settings/SecureSetting.java +++ b/server/src/main/java/org/opensearch/common/settings/SecureSetting.java @@ -45,7 +45,7 @@ /** * A secure setting. - * + *

                  * This class allows access to settings from the OpenSearch keystore. * * @opensearch.internal @@ -152,7 +152,7 @@ public void diff(Settings.Builder builder, Settings source, Settings defaultSett /** * A setting which contains a sensitive string. - * + *

                  * This may be any sensitive string, e.g. a username, a password, an auth token, etc. */ public static Setting secureString(String name, Setting fallback, Property... properties) { @@ -179,7 +179,7 @@ public static Setting insecureString(String name, String secureNam /** * A setting which contains a file. Reading the setting opens an input stream to the file. - * + *

                  * This may be any sensitive file, e.g. a set of credentials normally in plaintext. */ public static Setting secureFile(String name, Setting fallback, Property... properties) { diff --git a/server/src/main/java/org/opensearch/common/settings/SecureSettings.java b/server/src/main/java/org/opensearch/common/settings/SecureSettings.java index 2fe7d4834c92a..3732478243dab 100644 --- a/server/src/main/java/org/opensearch/common/settings/SecureSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/SecureSettings.java @@ -32,6 +32,7 @@ package org.opensearch.common.settings; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.settings.SecureString; import java.io.Closeable; @@ -43,8 +44,9 @@ /** * An accessor for settings which are securely stored. See {@link SecureSetting}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface SecureSettings extends Closeable { /** Returns true iff the settings are loaded and retrievable. */ diff --git a/server/src/main/java/org/opensearch/common/settings/Setting.java b/server/src/main/java/org/opensearch/common/settings/Setting.java index c43e0f26f9138..0e96edff0681c 100644 --- a/server/src/main/java/org/opensearch/common/settings/Setting.java +++ b/server/src/main/java/org/opensearch/common/settings/Setting.java @@ -38,6 +38,7 @@ import org.opensearch.Version; import org.opensearch.common.Booleans; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.regex.Regex; import org.opensearch.common.unit.MemorySizeValue; @@ -102,15 +103,17 @@ * } *

          * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class Setting implements ToXContentObject { /** * Property of the setting * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public enum Property { /** * should be filtered in some api (mask password/credentials) @@ -635,8 +638,9 @@ public Setting getConcreteSetting(String key) { * Allows a setting to declare a dependency on another setting being set. Optionally, a setting can validate the value of the dependent * setting. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface SettingDependency { /** @@ -784,8 +788,9 @@ public String toString() { /** * Allows an affix setting to declare a dependency on another affix setting. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface AffixSettingDependency extends SettingDependency { @Override @@ -796,8 +801,9 @@ public interface AffixSettingDependency extends SettingDependency { /** * An affix setting * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class AffixSetting extends Setting { private final AffixKey key; private final BiFunction> delegateFactory; @@ -1026,9 +1032,10 @@ public Map getAsMap(Settings settings) { * * @param the type of the {@link Setting} * - * @opensearch.internal + * @opensearch.api */ @FunctionalInterface + @PublicApi(since = "1.0.0") public interface Validator { /** @@ -2834,8 +2841,9 @@ private static AffixSetting affixKeySetting( /** * Key for the setting * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public interface Key { boolean match(String key); } @@ -2843,8 +2851,9 @@ public interface Key { /** * A simple key for a setting * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class SimpleKey implements Key { protected final String key; @@ -2918,8 +2927,9 @@ public boolean match(String toTest) { * A key that allows for static pre and suffix. This is used for settings * that have dynamic namespaces like for different accounts etc. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static final class AffixKey implements Key { private final Pattern pattern; private final String prefix; diff --git a/server/src/main/java/org/opensearch/common/settings/SettingUpgrader.java b/server/src/main/java/org/opensearch/common/settings/SettingUpgrader.java index 1dabf020d8398..dac0b9b867768 100644 --- a/server/src/main/java/org/opensearch/common/settings/SettingUpgrader.java +++ b/server/src/main/java/org/opensearch/common/settings/SettingUpgrader.java @@ -32,6 +32,8 @@ package org.opensearch.common.settings; +import org.opensearch.common.annotation.PublicApi; + import java.util.List; /** @@ -39,8 +41,9 @@ * * @param the type of the underlying setting * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface SettingUpgrader { /** diff --git a/server/src/main/java/org/opensearch/common/settings/Settings.java b/server/src/main/java/org/opensearch/common/settings/Settings.java index 75cbbc9fe438e..c5ef4972d087d 100644 --- a/server/src/main/java/org/opensearch/common/settings/Settings.java +++ b/server/src/main/java/org/opensearch/common/settings/Settings.java @@ -38,6 +38,7 @@ import org.opensearch.Version; import org.opensearch.common.Booleans; import org.opensearch.common.SetOnce; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.logging.LogConfigurator; import org.opensearch.common.unit.MemorySizeValue; @@ -88,17 +89,19 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.common.settings.AbstractScopedSettings.ARCHIVED_SETTINGS_PREFIX; import static org.opensearch.common.unit.TimeValue.parseTimeValue; import static org.opensearch.core.common.unit.ByteSizeValue.parseBytesSizeValue; /** * An immutable settings implementation. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Settings implements ToXContentFragment { - public static final Settings EMPTY = new Builder().build(); + public static final Settings EMPTY = new Settings(Collections.emptyMap(), null); /** The raw settings from the full key to raw string value. */ private final Map settings; @@ -749,11 +752,12 @@ public Set keySet() { * settings implementation. Use {@link Settings#builder()} in order to * construct it. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class Builder { - public static final Settings EMPTY_SETTINGS = new Builder().build(); + public static final Settings EMPTY_SETTINGS = Settings.EMPTY; // we use a sorted map for consistent serialization when using getAsMap() private final Map map = new TreeMap<>(); @@ -1217,8 +1221,8 @@ public boolean shouldRemoveMissingPlaceholder(String placeholderName) { } /** - * Checks that all settings in the builder start with the specified prefix. - * + * Checks that all settings(except archived settings and wildcards) in the builder start with the specified prefix. + *

          * If a setting doesn't start with the prefix, the builder appends the prefix to such setting. */ public Builder normalizePrefix(String prefix) { @@ -1227,7 +1231,7 @@ public Builder normalizePrefix(String prefix) { while (iterator.hasNext()) { Map.Entry entry = iterator.next(); String key = entry.getKey(); - if (key.startsWith(prefix) == false && key.endsWith("*") == false) { + if (key.startsWith(prefix) == false && key.endsWith("*") == false && key.startsWith(ARCHIVED_SETTINGS_PREFIX) == false) { replacements.put(prefix + key, entry.getValue()); iterator.remove(); } diff --git a/server/src/main/java/org/opensearch/common/settings/SettingsException.java b/server/src/main/java/org/opensearch/common/settings/SettingsException.java index 717d0f66de5d9..f250d19f79d4a 100644 --- a/server/src/main/java/org/opensearch/common/settings/SettingsException.java +++ b/server/src/main/java/org/opensearch/common/settings/SettingsException.java @@ -33,6 +33,7 @@ package org.opensearch.common.settings; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import java.io.IOException; @@ -40,8 +41,9 @@ /** * A generic failure to handle settings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class SettingsException extends OpenSearchException { public SettingsException(String message) { diff --git a/server/src/main/java/org/opensearch/common/time/DateFormatter.java b/server/src/main/java/org/opensearch/common/time/DateFormatter.java index d57fd441b9bf4..c98bd853dfced 100644 --- a/server/src/main/java/org/opensearch/common/time/DateFormatter.java +++ b/server/src/main/java/org/opensearch/common/time/DateFormatter.java @@ -126,6 +126,14 @@ default String formatJoda(DateTime dateTime) { */ String pattern(); + /** + * A name based format for this formatter. Can be one of the registered formatters like epoch_millis or + * a configured format like HH:mm:ss + * + * @return The name of this formatter + */ + String printPattern(); + /** * Returns the configured locale of the date formatter * @@ -147,7 +155,7 @@ default String formatJoda(DateTime dateTime) { */ DateMathParser toDateMathParser(); - static DateFormatter forPattern(String input) { + static DateFormatter forPattern(String input, String printPattern, Boolean canCacheFormatter) { if (Strings.hasLength(input) == false) { throw new IllegalArgumentException("No date pattern provided"); @@ -158,7 +166,28 @@ static DateFormatter forPattern(String input) { List patterns = splitCombinedPatterns(format); List formatters = patterns.stream().map(DateFormatters::forPattern).collect(Collectors.toList()); - return JavaDateFormatter.combined(input, formatters); + DateFormatter printFormatter = formatters.get(0); + if (Strings.hasLength(printPattern)) { + String printFormat = strip8Prefix(printPattern); + try { + printFormatter = DateFormatters.forPattern(printFormat); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Invalid print format: " + e.getMessage(), e); + } + } + return JavaDateFormatter.combined(input, formatters, printFormatter, canCacheFormatter); + } + + static DateFormatter forPattern(String input) { + return forPattern(input, null, false); + } + + static DateFormatter forPattern(String input, String printPattern) { + return forPattern(input, printPattern, false); + } + + static DateFormatter forPattern(String input, Boolean canCacheFormatter) { + return forPattern(input, null, canCacheFormatter); } static String strip8Prefix(String input) { diff --git a/server/src/main/java/org/opensearch/common/time/DateFormatters.java b/server/src/main/java/org/opensearch/common/time/DateFormatters.java index 6c8b9282d8a77..e74ab687b903b 100644 --- a/server/src/main/java/org/opensearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/opensearch/common/time/DateFormatters.java @@ -2172,10 +2172,10 @@ static DateFormatter forPattern(String input) { * or Instant.from(accessor). This results in a huge performance penalty and should be prevented * This method prevents exceptions by querying the accessor for certain capabilities * and then act on it accordingly - * + *

          * This action assumes that we can reliably fall back to some defaults if not all parts of a * zoned date time are set - * + *

          * - If a zoned date time is passed, it is returned * - If no timezone is found, ZoneOffset.UTC is used * - If we find a time and a date, converting to a ZonedDateTime is straight forward, diff --git a/server/src/main/java/org/opensearch/common/time/DateMathParser.java b/server/src/main/java/org/opensearch/common/time/DateMathParser.java index f6573eaa90286..7088d6cb7a498 100644 --- a/server/src/main/java/org/opensearch/common/time/DateMathParser.java +++ b/server/src/main/java/org/opensearch/common/time/DateMathParser.java @@ -64,12 +64,12 @@ default Instant parse(String text, LongSupplier now, boolean roundUpProperty, Da /** * Parse text, that potentially contains date math into the milliseconds since the epoch - * + *

          * Examples are - * + *

          * 2014-11-18||-2y subtracts two years from the input date * now/m rounds the current time to minute granularity - * + *

          * Supported rounding units are * y year * M month diff --git a/server/src/main/java/org/opensearch/common/time/DateUtils.java b/server/src/main/java/org/opensearch/common/time/DateUtils.java index 021b8a3be8b23..7ab395a1117e7 100644 --- a/server/src/main/java/org/opensearch/common/time/DateUtils.java +++ b/server/src/main/java/org/opensearch/common/time/DateUtils.java @@ -342,7 +342,7 @@ public static long toMilliSeconds(long nanoSecondsSinceEpoch) { /** * Rounds the given utc milliseconds sicne the epoch down to the next unit millis - * + *

          * Note: This does not check for correctness of the result, as this only works with units smaller or equal than a day * In order to ensure the performance of this methods, there are no guards or checks in it * diff --git a/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java b/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java index f3459a5857b9e..7fc39e063efb5 100644 --- a/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java +++ b/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java @@ -32,12 +32,12 @@ * This class has been copied from different locations within the joda time package, as * these methods fast when used for rounding, as they do not require conversion to java * time objects - * + *

          * This code has been copied from jodatime 2.10.1 * The source can be found at https://github.com/JodaOrg/joda-time/tree/v2.10.1 - * + *

          * See following methods have been copied (along with required helper variables) - * + *

          * - org.joda.time.chrono.GregorianChronology.calculateFirstDayOfYearMillis(int year) * - org.joda.time.chrono.BasicChronology.getYear(int year) * - org.joda.time.chrono.BasicGJChronology.getMonthOfYear(long utcMillis, int year) diff --git a/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java index 07013a3dc75f2..89eb19fdc915e 100644 --- a/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java @@ -32,6 +32,7 @@ package org.opensearch.common.time; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import java.text.ParsePosition; @@ -67,9 +68,12 @@ class JavaDateFormatter implements DateFormatter { } private final String format; + private final String printFormat; private final DateTimeFormatter printer; private final List parsers; private final JavaDateFormatter roundupParser; + private final Boolean canCacheLastParsedFormatter; + private volatile DateTimeFormatter lastParsedformatter = null; /** * A round up formatter @@ -93,8 +97,18 @@ JavaDateFormatter getRoundupParser() { } // named formatters use default roundUpParser + JavaDateFormatter( + String format, + String printFormat, + DateTimeFormatter printer, + Boolean canCacheLastParsedFormatter, + DateTimeFormatter... parsers + ) { + this(format, printFormat, printer, ROUND_UP_BASE_FIELDS, canCacheLastParsedFormatter, parsers); + } + JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter... parsers) { - this(format, printer, ROUND_UP_BASE_FIELDS, parsers); + this(format, format, printer, false, parsers); } private static final BiConsumer ROUND_UP_BASE_FIELDS = (builder, parser) -> { @@ -111,8 +125,10 @@ JavaDateFormatter getRoundupParser() { // subclasses override roundUpParser JavaDateFormatter( String format, + String printFormat, DateTimeFormatter printer, BiConsumer roundupParserConsumer, + Boolean canCacheLastParsedFormatter, DateTimeFormatter... parsers ) { if (printer == null) { @@ -128,6 +144,8 @@ JavaDateFormatter getRoundupParser() { } this.printer = printer; this.format = format; + this.printFormat = printFormat; + this.canCacheLastParsedFormatter = canCacheLastParsedFormatter; if (parsers.length == 0) { this.parsers = Collections.singletonList(printer); @@ -138,6 +156,15 @@ JavaDateFormatter getRoundupParser() { this.roundupParser = new RoundUpFormatter(format, roundUp); } + JavaDateFormatter( + String format, + DateTimeFormatter printer, + BiConsumer roundupParserConsumer, + DateTimeFormatter... parsers + ) { + this(format, format, printer, roundupParserConsumer, false, parsers); + } + /** * This is when the RoundUp Formatters are created. In further merges (with ||) it will only append them to a list. * || is not expected to be provided as format when a RoundUp formatter is created. It will be splitted before in @@ -164,36 +191,61 @@ private List createRoundUpParser( return null; } - public static DateFormatter combined(String input, List formatters) { + public static DateFormatter combined( + String input, + List formatters, + DateFormatter printFormatter, + Boolean canCacheLastParsedFormatter + ) { assert formatters.size() > 0; + assert printFormatter != null; List parsers = new ArrayList<>(formatters.size()); List roundUpParsers = new ArrayList<>(formatters.size()); - DateTimeFormatter printer = null; + assert printFormatter instanceof JavaDateFormatter; + JavaDateFormatter javaPrintFormatter = (JavaDateFormatter) printFormatter; + DateTimeFormatter printer = javaPrintFormatter.getPrinter(); for (DateFormatter formatter : formatters) { assert formatter instanceof JavaDateFormatter; JavaDateFormatter javaDateFormatter = (JavaDateFormatter) formatter; - if (printer == null) { - printer = javaDateFormatter.getPrinter(); - } parsers.addAll(javaDateFormatter.getParsers()); roundUpParsers.addAll(javaDateFormatter.getRoundupParser().getParsers()); } - return new JavaDateFormatter(input, printer, roundUpParsers, parsers); + return new JavaDateFormatter( + input, + javaPrintFormatter.format, + printer, + roundUpParsers, + parsers, + canCacheLastParsedFormatter & FeatureFlags.isEnabled(FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING) + ); // check if caching is enabled } private JavaDateFormatter( String format, + String printFormat, DateTimeFormatter printer, List roundUpParsers, - List parsers + List parsers, + Boolean canCacheLastParsedFormatter ) { this.format = format; + this.printFormat = printFormat; this.printer = printer; this.roundupParser = roundUpParsers != null ? new RoundUpFormatter(format, roundUpParsers) : null; this.parsers = parsers; + this.canCacheLastParsedFormatter = canCacheLastParsedFormatter; + } + + private JavaDateFormatter( + String format, + DateTimeFormatter printer, + List roundUpParsers, + List parsers + ) { + this(format, format, printer, roundUpParsers, parsers, false); } JavaDateFormatter getRoundupParser() { @@ -222,7 +274,7 @@ public TemporalAccessor parse(String input) { * it will continue iterating if the previous parser failed. The pattern must fully match, meaning whole input was used. * This also means that this method depends on DateTimeFormatter.ClassicFormat.parseObject * which does not throw exceptions when parsing failed. - * + *

          * The approach with collection of parsers was taken because java-time requires ordering on optional (composite) * patterns. Joda does not suffer from this. * https://bugs.openjdk.java.net/browse/JDK-8188771 @@ -233,13 +285,23 @@ public TemporalAccessor parse(String input) { */ private TemporalAccessor doParse(String input) { if (parsers.size() > 1) { + Object object = null; + if (canCacheLastParsedFormatter && lastParsedformatter != null) { + ParsePosition pos = new ParsePosition(0); + object = lastParsedformatter.toFormat().parseObject(input, pos); + if (parsingSucceeded(object, input, pos)) { + return (TemporalAccessor) object; + } + } for (DateTimeFormatter formatter : parsers) { ParsePosition pos = new ParsePosition(0); - Object object = formatter.toFormat().parseObject(input, pos); + object = formatter.toFormat().parseObject(input, pos); if (parsingSucceeded(object, input, pos)) { + lastParsedformatter = formatter; return (TemporalAccessor) object; } } + throw new DateTimeParseException("Failed to parse with all enclosed parsers", input, 0); } return this.parsers.get(0).parse(input); @@ -260,7 +322,7 @@ public DateFormatter withZone(ZoneId zoneId) { .stream() .map(p -> p.withZone(zoneId)) .collect(Collectors.toList()); - return new JavaDateFormatter(format, printer.withZone(zoneId), roundUpParsers, parsers); + return new JavaDateFormatter(format, printFormat, printer.withZone(zoneId), roundUpParsers, parsers, canCacheLastParsedFormatter); } @Override @@ -274,7 +336,7 @@ public DateFormatter withLocale(Locale locale) { .stream() .map(p -> p.withLocale(locale)) .collect(Collectors.toList()); - return new JavaDateFormatter(format, printer.withLocale(locale), roundUpParsers, parsers); + return new JavaDateFormatter(format, printFormat, printer.withLocale(locale), roundUpParsers, parsers, canCacheLastParsedFormatter); } @Override @@ -287,6 +349,11 @@ public String pattern() { return format; } + @Override + public String printPattern() { + return printFormat; + } + @Override public Locale locale() { return this.printer.getLocale(); diff --git a/server/src/main/java/org/opensearch/common/time/JavaDateMathParser.java b/server/src/main/java/org/opensearch/common/time/JavaDateMathParser.java index 0536324b6516b..340901e7ac8e2 100644 --- a/server/src/main/java/org/opensearch/common/time/JavaDateMathParser.java +++ b/server/src/main/java/org/opensearch/common/time/JavaDateMathParser.java @@ -51,7 +51,7 @@ /** * A parser for date/time formatted text with optional date math. - * + *

          * The format of the datetime is configurable, and unix timestamps can also be used. Datemath * is appended to a datetime with the following syntax: * ||[+-/](\d+)?[yMwdhHms]. diff --git a/server/src/main/java/org/opensearch/common/util/BinarySearcher.java b/server/src/main/java/org/opensearch/common/util/BinarySearcher.java index ca63c170c0ccd..e4315f8699206 100644 --- a/server/src/main/java/org/opensearch/common/util/BinarySearcher.java +++ b/server/src/main/java/org/opensearch/common/util/BinarySearcher.java @@ -34,14 +34,14 @@ /** * Performs binary search on an arbitrary data structure. - * + *

          * To do a search, create a subclass and implement custom {@link #compare(int)} and {@link #distance(int)} methods. - * + *

          * {@link BinarySearcher} knows nothing about the value being searched for or the underlying data structure. * These things should be determined by the subclass in its overridden methods. - * + *

          * Refer to {@link BigArrays.DoubleBinarySearcher} for an example. - * + *

          * NOTE: this class is not thread safe * * @opensearch.internal @@ -74,7 +74,7 @@ private int getClosestIndex(int index1, int index2) { /** * Uses a binary search to determine the index of the element within the index range {from, ... , to} that is * closest to the search value. - * + *

          * Unlike most binary search implementations, the value being searched for is not an argument to search method. * Rather, this value should be stored by the subclass along with the underlying array. * diff --git a/server/src/main/java/org/opensearch/common/util/BytesRefHash.java b/server/src/main/java/org/opensearch/common/util/BytesRefHash.java index ecc93d017beaf..4afba2905019a 100644 --- a/server/src/main/java/org/opensearch/common/util/BytesRefHash.java +++ b/server/src/main/java/org/opensearch/common/util/BytesRefHash.java @@ -33,152 +33,292 @@ package org.opensearch.common.util; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.Numbers; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.hash.T1ha1; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.core.common.util.ByteArray; /** - * Specialized hash table implementation similar to Lucene's BytesRefHash that maps - * BytesRef values to ids. Collisions are resolved with open addressing and linear - * probing, growth is smooth thanks to {@link BigArrays}, hashes are cached for faster - * re-hashing and capacity is always a multiple of 2 for faster identification of buckets. - * This class is not thread-safe. + * Specialized hash table implementation that maps a {@link BytesRef} key to a long ordinal. * - * @opensearch.internal + *

          + * It uses a compact byte-packing strategy to encode the ordinal and fingerprint information + * in the hash table value. It makes lookups faster by short-circuiting expensive equality checks + * for keys that collide onto the same hash table slot. + * + *

          + * This class is not thread-safe. + * + * @opensearch.internal */ -public final class BytesRefHash extends AbstractHash { +@InternalApi +public final class BytesRefHash implements Releasable { + private static final long MAX_CAPACITY = 1L << 32; + private static final long DEFAULT_INITIAL_CAPACITY = 32; + private static final float DEFAULT_LOAD_FACTOR = 0.6f; + private static final Hasher DEFAULT_HASHER = key -> T1ha1.hash(key.bytes, key.offset, key.length); + + private static final long MASK_ORDINAL = 0x00000000FFFFFFFFL; // extract ordinal + private static final long MASK_FINGERPRINT = 0xFFFFFFFF00000000L; // extract fingerprint + + /** + * Maximum load factor after which the capacity is doubled. + */ + private final float loadFactor; + + /** + * Calculates the hash of a {@link BytesRef} key. + */ + private final Hasher hasher; + + /** + * Utility class to allocate recyclable arrays. + */ + private final BigArrays bigArrays; + + /** + * Reusable BytesRef to read keys. + */ + private final BytesRef scratch = new BytesRef(); + + /** + * Current capacity of the hash table. This must be a power of two so that the hash table slot + * can be identified quickly using bitmasks, thus avoiding expensive modulo or integer division. + */ + private long capacity; + + /** + * Bitmask to identify the hash table slot from a key's hash. + */ + private long mask; + + /** + * Size threshold after which the hash table needs to be doubled in capacity. + */ + private long grow; + + /** + * Current size of the hash table. + */ + private long size; + + /** + * Underlying array to store the hash table values. + * + *

          + * Each hash table value (64-bit) uses the following byte packing strategy: + *

          +     * |================================|================================|
          +     * | Fingerprint                    | Ordinal                        |
          +     * |--------------------------------|--------------------------------|
          +     * | 32 bits                        | 32 bits                        |
          +     * |================================|================================|
          +     * 
          + * + *

          + * This allows us to encode and manipulate additional information in the hash table + * itself without having to look elsewhere in the memory, which is much slower. + * + *

          + * Terminology: table[index] = value = (fingerprint | ordinal) + */ + private LongArray table; + + /** + * Underlying array to store the starting offsets of keys. + * + *

          + * Terminology: + *

          +     *   offsets[ordinal] = starting offset (inclusive)
          +     *   offsets[ordinal + 1] = ending offset (exclusive)
          +     * 
          + */ + private LongArray offsets; + + /** + * Underlying byte array to store the keys. + * + *

          + * Terminology: keys[start...end] = key + */ + private ByteArray keys; - private LongArray startOffsets; - private ByteArray bytes; - private IntArray hashes; // we cache hashes for faster re-hashing - private final BytesRef spare; + /** + * Pre-computed hashes of the stored keys. + * It is used to speed up reinserts when doubling the capacity. + */ + private LongArray hashes; - // Constructor with configurable capacity and default maximum load factor. - public BytesRefHash(long capacity, BigArrays bigArrays) { - this(capacity, DEFAULT_MAX_LOAD_FACTOR, bigArrays); + public BytesRefHash(final BigArrays bigArrays) { + this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_HASHER, bigArrays); } - // Constructor with configurable capacity and load factor. - public BytesRefHash(long capacity, float maxLoadFactor, BigArrays bigArrays) { - super(capacity, maxLoadFactor, bigArrays); - startOffsets = bigArrays.newLongArray(capacity + 1, false); - startOffsets.set(0, 0); - bytes = bigArrays.newByteArray(capacity * 3, false); - hashes = bigArrays.newIntArray(capacity, false); - spare = new BytesRef(); + public BytesRefHash(final long initialCapacity, final BigArrays bigArrays) { + this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_HASHER, bigArrays); } - // BytesRef has a weak hashCode function so we try to improve it by rehashing using Murmur3 - // Feel free to remove rehashing if BytesRef gets a better hash function - private static int rehash(int hash) { - return BitMixer.mix32(hash); + public BytesRefHash(final long initialCapacity, final float loadFactor, final BigArrays bigArrays) { + this(initialCapacity, loadFactor, DEFAULT_HASHER, bigArrays); } - /** - * Return the key at 0 <= index <= capacity(). The result is undefined if the slot is unused. - *

          Beware that the content of the {@link BytesRef} may become invalid as soon as {@link #close()} is called

          - */ - public BytesRef get(long id, BytesRef dest) { - final long startOffset = startOffsets.get(id); - final int length = (int) (startOffsets.get(id + 1) - startOffset); - bytes.get(startOffset, length, dest); - return dest; + public BytesRefHash(final long initialCapacity, final float loadFactor, final Hasher hasher, final BigArrays bigArrays) { + assert initialCapacity > 0 : "initial capacity must be greater than 0"; + assert loadFactor > 0 && loadFactor < 1 : "load factor must be between 0 and 1"; + + this.loadFactor = loadFactor; + this.hasher = hasher; + this.bigArrays = bigArrays; + + capacity = Numbers.nextPowerOfTwo((long) (initialCapacity / loadFactor)); + assert capacity <= MAX_CAPACITY : "required capacity too large"; + mask = capacity - 1; + size = 0; + grow = (long) (capacity * loadFactor); + + table = bigArrays.newLongArray(capacity, false); + table.fill(0, capacity, -1); + offsets = bigArrays.newLongArray(initialCapacity + 1, false); + offsets.set(0, 0); + keys = bigArrays.newByteArray(initialCapacity * 3, false); + hashes = bigArrays.newLongArray(initialCapacity, false); } /** - * Get the id associated with key + * Adds the given key to the hash table and returns its ordinal. + * If the key exists already, it returns (-1 - ordinal). */ - public long find(BytesRef key, int code) { - final long slot = slot(rehash(code), mask); - for (long index = slot;; index = nextSlot(index, mask)) { - final long id = id(index); - if (id == -1L || key.bytesEquals(get(id, spare))) { - return id; + public long add(final BytesRef key) { + final long hash = hasher.hash(key); + final long fingerprint = hash & MASK_FINGERPRINT; + + for (long idx = hash & mask, value, ordinal;; idx = (idx + 1) & mask) { + if ((value = table.get(idx)) == -1) { + final long val = fingerprint | size; + if (size >= grow) { + growAndInsert(hash, val); + } else { + table.set(idx, val); + } + return append(key, hash); + } else if (((value & MASK_FINGERPRINT) == fingerprint) && key.bytesEquals(get(ordinal = (value & MASK_ORDINAL), scratch))) { + return -1 - ordinal; } } } - /** Sugar for {@link #find(BytesRef, int) find(key, key.hashCode()} */ - public long find(BytesRef key) { - return find(key, key.hashCode()); - } + /** + * Returns the ordinal associated with the given key, or -1 if the key doesn't exist. + * + *

          + * Using the 64-bit hash value, up to 32 least significant bits (LSB) are used to identify the + * home slot in the hash table, and an additional 32 bits are used to identify the fingerprint. + * The fingerprint further increases the entropy and reduces the number of false lookups in the + * keys' table during equality checks, which is expensive. + * + *

          + * Total entropy bits = 32 + log2(capacity) + * + *

          + * Linear probing starts from the home slot, until a match or an empty slot is found. + * Values are first checked using their fingerprint (to reduce false positives), then verified + * in the keys' table using an equality check. + */ + public long find(final BytesRef key) { + final long hash = hasher.hash(key); + final long fingerprint = hash & MASK_FINGERPRINT; - private long set(BytesRef key, int code, long id) { - assert rehash(key.hashCode()) == code; - assert size < maxSize; - final long slot = slot(code, mask); - for (long index = slot;; index = nextSlot(index, mask)) { - final long curId = id(index); - if (curId == -1) { // means unset - id(index, id); - append(id, key, code); - ++size; - return id; - } else if (key.bytesEquals(get(curId, spare))) { - return -1 - curId; + for (long idx = hash & mask, value, ordinal;; idx = (idx + 1) & mask) { + if ((value = table.get(idx)) == -1) { + return -1; + } else if (((value & MASK_FINGERPRINT) == fingerprint) && key.bytesEquals(get(ordinal = (value & MASK_ORDINAL), scratch))) { + return ordinal; } } } - private void append(long id, BytesRef key, int code) { - assert size == id; - final long startOffset = startOffsets.get(size); - bytes = bigArrays.grow(bytes, startOffset + key.length); - bytes.set(startOffset, key.bytes, key.offset, key.length); - startOffsets = bigArrays.grow(startOffsets, size + 2); - startOffsets.set(size + 1, startOffset + key.length); - hashes = bigArrays.grow(hashes, id + 1); - hashes.set(id, code); + /** + * Returns the key associated with the given ordinal. + * The result is undefined for an unused ordinal. + * + *

          + * Beware that the content of the {@link BytesRef} may become invalid as soon as {@link #close()} is called + */ + public BytesRef get(final long ordinal, final BytesRef dest) { + final long start = offsets.get(ordinal); + final int length = (int) (offsets.get(ordinal + 1) - start); + keys.get(start, length, dest); + return dest; } - private boolean assertConsistent(long id, int code) { - get(id, spare); - return rehash(spare.hashCode()) == code; + /** + * Returns the number of mappings in this hash table. + */ + public long size() { + return size; } - private void reset(int code, long id) { - assert assertConsistent(id, code); - final long slot = slot(code, mask); - for (long index = slot;; index = nextSlot(index, mask)) { - final long curId = id(index); - if (curId == -1) { // means unset - id(index, id); - break; - } - } + /** + * Appends the key in the keys' and offsets' tables. + */ + private long append(final BytesRef key, final long hash) { + final long start = offsets.get(size); + final long end = start + key.length; + offsets = bigArrays.grow(offsets, size + 2); + offsets.set(size + 1, end); + keys = bigArrays.grow(keys, end); + keys.set(start, key.bytes, key.offset, key.length); + hashes = bigArrays.grow(hashes, size + 1); + hashes.set(size, hash); + return size++; } /** - * Try to add key. Return its newly allocated id if it wasn't in the hash table yet, or -1-id - * if it was already present in the hash table. + * Grows the hash table by doubling its capacity, inserting the provided value, + * and reinserting the previous values at their updated slots. */ - public long add(BytesRef key, int code) { - if (size >= maxSize) { - assert size == maxSize; - grow(); - } - assert size < maxSize; - return set(key, rehash(code), size); - } + private void growAndInsert(final long hash, final long value) { + // Ensure that the hash table doesn't grow too large. + // This implicitly also ensures that the ordinals are no larger than 2^32, thus, + // preventing them from polluting the fingerprint bits in the hash table values. + assert capacity < MAX_CAPACITY : "hash table already at the max capacity"; + + capacity <<= 1; + mask = capacity - 1; + grow = (long) (capacity * loadFactor); + table = bigArrays.grow(table, capacity); + table.fill(0, capacity, -1); + table.set(hash & mask, value); - /** Sugar to {@link #add(BytesRef, int) add(key, key.hashCode()}. */ - public long add(BytesRef key) { - return add(key, key.hashCode()); + for (long ordinal = 0; ordinal < size; ordinal++) { + reinsert(ordinal, hashes.get(ordinal)); + } } - @Override - protected void removeAndAdd(long index) { - final long id = id(index, -1); - assert id >= 0; - final int code = hashes.get(id); - reset(code, id); + /** + * Reinserts the hash table value for an existing key stored at the given ordinal. + */ + private void reinsert(final long ordinal, final long hash) { + for (long idx = hash & mask;; idx = (idx + 1) & mask) { + if (table.get(idx) == -1) { + table.set(idx, (hash & MASK_FINGERPRINT) | ordinal); + return; + } + } } @Override public void close() { - try (Releasable releasable = Releasables.wrap(bytes, hashes, startOffsets)) { - super.close(); - } + Releasables.close(table, offsets, keys, hashes); } + /** + * Hasher calculates the hash of a {@link BytesRef} key. + */ + @FunctionalInterface + public interface Hasher { + long hash(BytesRef key); + } } diff --git a/server/src/main/java/org/opensearch/common/util/CancellableThreads.java b/server/src/main/java/org/opensearch/common/util/CancellableThreads.java index 8bc3ca3affb12..67dd4b848f4c0 100644 --- a/server/src/main/java/org/opensearch/common/util/CancellableThreads.java +++ b/server/src/main/java/org/opensearch/common/util/CancellableThreads.java @@ -45,7 +45,7 @@ * A utility class for multi threaded operation that needs to be cancellable via interrupts. Every cancellable operation should be * executed via {@link #execute(Interruptible)}, which will capture the executing thread and make sure it is interrupted in the case * of cancellation. - * + *

          * Cancellation policy: This class does not support external interruption via Thread#interrupt(). Always use #cancel() instead. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/common/util/CuckooFilter.java b/server/src/main/java/org/opensearch/common/util/CuckooFilter.java index 0c792b37ccfa9..28b55f70855d6 100644 --- a/server/src/main/java/org/opensearch/common/util/CuckooFilter.java +++ b/server/src/main/java/org/opensearch/common/util/CuckooFilter.java @@ -46,33 +46,33 @@ /** * An approximate set membership datastructure - * + *

          * CuckooFilters are similar to Bloom Filters in usage; values are inserted, and the Cuckoo * can be asked if it has seen a particular value before. Because the structure is approximate, * it can return false positives (says it has seen an item when it has not). False negatives * are not possible though; if the structure says it _has not_ seen an item, that can be * trusted. - * + *

          * The filter can "saturate" at which point the map has hit it's configured load factor (or near enough * that a large number of evictions are not able to find a free slot) and will refuse to accept * any new insertions. - * + *

          * NOTE: this version does not support deletions, and as such does not save duplicate * fingerprints (e.g. when inserting, if the fingerprint is already present in the * candidate buckets, it is not inserted). By not saving duplicates, the CuckooFilter * loses the ability to delete values. But not by allowing deletions, we can save space * (do not need to waste slots on duplicate fingerprints), and we do not need to worry * about inserts "overflowing" a bucket because the same item has been repeated repeatedly - * + *

          * NOTE: this CuckooFilter exposes a number of Expert APIs which assume the caller has * intimate knowledge about how the algorithm works. It is recommended to use * {@link SetBackedScalingCuckooFilter} instead. - * + *

          * Based on the paper: - * + *

          * Fan, Bin, et al. "Cuckoo filter: Practically better than bloom." * Proceedings of the 10th ACM International on Conference on emerging Networking Experiments and Technologies. ACM, 2014. - * + *

          * https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf * * @opensearch.internal @@ -200,7 +200,7 @@ public int getCount() { /** * Returns the number of buckets that has been chosen based * on the initial configuration - * + *

          * Expert-level API */ int getNumBuckets() { @@ -209,7 +209,7 @@ int getNumBuckets() { /** * Returns the number of bits used per entry - * + *

          * Expert-level API */ int getBitsPerEntry() { @@ -220,7 +220,7 @@ int getBitsPerEntry() { * Returns the cached fingerprint mask. This is simply a mask for the * first bitsPerEntry bits, used by {@link CuckooFilter#fingerprint(int, int, int)} * to generate the fingerprint of a hash - * + *

          * Expert-level API */ int getFingerprintMask() { @@ -230,7 +230,7 @@ int getFingerprintMask() { /** * Returns an iterator that returns the long[] representation of each bucket. The value * inside each long will be a fingerprint (or 0L, representing empty). - * + *

          * Expert-level API */ Iterator getBuckets() { @@ -267,7 +267,7 @@ boolean mightContain(long hash) { /** * Returns true if the bucket or it's alternate bucket contains the fingerprint. - * + *

          * Expert-level API, use {@link CuckooFilter#mightContain(long)} to check if * a value is in the filter. */ @@ -307,7 +307,7 @@ boolean add(long hash) { /** * Attempts to merge the fingerprint into the specified bucket or it's alternate bucket. * Returns true if the insertion was successful, false if the filter is saturated. - * + *

          * Expert-level API, use {@link CuckooFilter#add(long)} to insert * values into the filter */ @@ -351,7 +351,7 @@ boolean mergeFingerprint(int bucket, int fingerprint) { * Low-level insert method. Attempts to write the fingerprint into an empty entry * at this bucket's position. Returns true if that was sucessful, false if all entries * were occupied. - * + *

          * If the fingerprint already exists in one of the entries, it will not duplicate the * fingerprint like the original paper. This means the filter _cannot_ support deletes, * but is not sensitive to "overflowing" buckets with repeated inserts @@ -376,10 +376,10 @@ private boolean tryInsert(int bucket, int fingerprint) { /** * Converts a hash into a bucket index (primary or alternate). - * + *

          * If the hash is negative, this flips the bits. The hash is then modulo numBuckets * to get the final index. - * + *

          * Expert-level API */ static int hashToIndex(int hash, int numBuckets) { @@ -388,16 +388,16 @@ static int hashToIndex(int hash, int numBuckets) { /** * Calculates the alternate bucket for a given bucket:fingerprint tuple - * + *

          * The alternate bucket is the fingerprint multiplied by a mixing constant, * then xor'd against the bucket. This new value is modulo'd against * the buckets via {@link CuckooFilter#hashToIndex(int, int)} to get the final * index. - * + *

          * Note that the xor makes this operation reversible as long as we have the * fingerprint and current bucket (regardless of if that bucket was the primary * or alternate). - * + *

          * Expert-level API */ static int alternateIndex(int bucket, int fingerprint, int numBuckets) { @@ -424,10 +424,10 @@ private int getOffset(int bucket, int position) { /** * Calculates the fingerprint for a given hash. - * + *

          * The fingerprint is simply the first `bitsPerEntry` number of bits that are non-zero. * If the entire hash is zero, `(int) 1` is used - * + *

          * Expert-level API */ static int fingerprint(int hash, int bitsPerEntry, int fingerprintMask) { @@ -501,7 +501,7 @@ private double getLoadFactor(int b) { * Calculates the optimal number of buckets for this filter. The xor used in the bucketing * algorithm requires this to be a power of two, so the optimal number of buckets will * be rounded to the next largest power of two where applicable. - * + *

          * TODO: there are schemes to avoid powers of two, might want to investigate those */ private int getNumBuckets(long capacity, double loadFactor, int b) { diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index e2663b56c5cca..4e9b417e3433b 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -20,19 +20,12 @@ * @opensearch.internal */ public class FeatureFlags { - /** * Gates the visibility of the segment replication experimental features that allows users to test unreleased beta features. */ public static final String SEGMENT_REPLICATION_EXPERIMENTAL = "opensearch.experimental.feature.segment_replication_experimental.enabled"; - /** - * Gates the visibility of the index setting that allows persisting data to remote store along with local disk. - * Once the feature is ready for production release, this feature flag can be removed. - */ - public static final String REMOTE_STORE = "opensearch.experimental.feature.remote_store.enabled"; - /** * Gates the ability for Searchable Snapshots to read snapshots that are older than the * guaranteed backward compatibility for OpenSearch (one prior major version) on a best effort basis. @@ -62,6 +55,11 @@ public class FeatureFlags { */ public static final String TELEMETRY = "opensearch.experimental.feature.telemetry.enabled"; + /** + * Gates the optimization of datetime formatters caching along with change in default datetime formatter. + */ + public static final String DATETIME_FORMATTER_CACHING = "opensearch.experimental.optimization.datetime_formatter_caching.enabled"; + /** * Should store the settings from opensearch.yml. */ @@ -90,14 +88,23 @@ public static boolean isEnabled(String featureFlagName) { return settings != null && settings.getAsBoolean(featureFlagName, false); } + public static boolean isEnabled(Setting featureFlag) { + if ("true".equalsIgnoreCase(System.getProperty(featureFlag.getKey()))) { + // TODO: Remove the if condition once FeatureFlags are only supported via opensearch.yml + return true; + } else if (settings != null) { + return featureFlag.get(settings); + } else { + return featureFlag.getDefault(Settings.EMPTY); + } + } + public static final Setting SEGMENT_REPLICATION_EXPERIMENTAL_SETTING = Setting.boolSetting( SEGMENT_REPLICATION_EXPERIMENTAL, false, Property.NodeScope ); - public static final Setting REMOTE_STORE_SETTING = Setting.boolSetting(REMOTE_STORE, false, Property.NodeScope); - public static final Setting EXTENSIONS_SETTING = Setting.boolSetting(EXTENSIONS, false, Property.NodeScope); public static final Setting IDENTITY_SETTING = Setting.boolSetting(IDENTITY, false, Property.NodeScope); @@ -109,4 +116,10 @@ public static boolean isEnabled(String featureFlagName) { false, Property.NodeScope ); + + public static final Setting DATETIME_FORMATTER_CACHING_SETTING = Setting.boolSetting( + DATETIME_FORMATTER_CACHING, + true, + Property.NodeScope + ); } diff --git a/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java b/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java index 417eb6a316d86..86e7227cb6c85 100644 --- a/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java +++ b/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java @@ -8,7 +8,10 @@ package org.opensearch.common.util; +import org.opensearch.common.Numbers; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; /** * Specialized hash table implementation that maps a (primitive) long to long. @@ -24,6 +27,7 @@ * * @opensearch.internal */ +@InternalApi public class ReorganizingLongHash implements Releasable { private static final long MAX_CAPACITY = 1L << 32; private static final long DEFAULT_INITIAL_CAPACITY = 32; @@ -109,7 +113,8 @@ public ReorganizingLongHash(final long initialCapacity, final float loadFactor, this.bigArrays = bigArrays; this.loadFactor = loadFactor; - capacity = nextPowerOfTwo((long) (initialCapacity / loadFactor)); + capacity = Numbers.nextPowerOfTwo((long) (initialCapacity / loadFactor)); + assert capacity <= MAX_CAPACITY : "required capacity too large"; mask = capacity - 1; grow = (long) (capacity * loadFactor); size = 0; @@ -296,11 +301,6 @@ private void grow() { @Override public void close() { - table.close(); - keys.close(); - } - - private static long nextPowerOfTwo(final long value) { - return Math.max(1, Long.highestOneBit(value - 1) << 1); + Releasables.close(table, keys); } } diff --git a/server/src/main/java/org/opensearch/common/util/SetBackedScalingCuckooFilter.java b/server/src/main/java/org/opensearch/common/util/SetBackedScalingCuckooFilter.java index e99eb751babe8..a635160844159 100644 --- a/server/src/main/java/org/opensearch/common/util/SetBackedScalingCuckooFilter.java +++ b/server/src/main/java/org/opensearch/common/util/SetBackedScalingCuckooFilter.java @@ -75,7 +75,7 @@ public class SetBackedScalingCuckooFilter implements Writeable { * This set is used to track the insertions before we convert over to an approximate * filter. This gives us 100% accuracy for small cardinalities. This will be null * if isSetMode = false; - * + *

          * package-private for testing */ Set hashes; @@ -178,7 +178,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * Registers a circuit breaker with the datastructure. - * + *

          * CuckooFilter's can "saturate" and refuse to accept any new values. When this happens, * the datastructure scales by adding a new filter. This new filter's bytes will be tracked * in the registered breaker when configured. diff --git a/server/src/main/java/org/opensearch/common/util/TokenBucket.java b/server/src/main/java/org/opensearch/common/util/TokenBucket.java index d2e7e836bf07f..a9ebb86eed8a2 100644 --- a/server/src/main/java/org/opensearch/common/util/TokenBucket.java +++ b/server/src/main/java/org/opensearch/common/util/TokenBucket.java @@ -20,7 +20,7 @@ public class TokenBucket { /** * Defines a monotonically increasing counter. - * + *

          * Usage examples: * 1. clock = System::nanoTime can be used to perform rate-limiting per unit time * 2. clock = AtomicLong::get can be used to perform rate-limiting per unit number of operations diff --git a/server/src/main/java/org/opensearch/common/util/URIPattern.java b/server/src/main/java/org/opensearch/common/util/URIPattern.java index a3c385e5ea660..49e4b53e20740 100644 --- a/server/src/main/java/org/opensearch/common/util/URIPattern.java +++ b/server/src/main/java/org/opensearch/common/util/URIPattern.java @@ -39,9 +39,9 @@ /** * URI Pattern matcher - * + *

          * The pattern is URI in which authority, path, query and fragment can be replace with simple pattern. - * + *

          * For example: foobar://*.local/some_path/*?*#* will match all uris with schema foobar in local domain * with any port, with path that starts some_path and with any query and fragment. * diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessor.java b/server/src/main/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessor.java index 7079aa705d126..be2029b2e7c62 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessor.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/BufferedAsyncIOProcessor.java @@ -92,4 +92,8 @@ private TimeValue getBufferInterval() { protected abstract String getBufferProcessThreadPoolName(); + // Exclusively for testing, please do not use it elsewhere. + public Supplier getBufferIntervalSupplier() { + return bufferIntervalSupplier; + } } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java b/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java index b1f4714a90e8e..4357254176358 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java @@ -73,7 +73,7 @@ public void addListener(ActionListener listener, ExecutorService executor) { * notified of a response or exception in a runnable submitted to the ExecutorService provided. * If the future has completed, the listener will be notified immediately without forking to * a different thread. - * + *

          * It will apply the provided ThreadContext (if not null) when executing the listening. */ public void addListener(ActionListener listener, ExecutorService executor, ThreadContext threadContext) { diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchThreadPoolExecutor.java b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchThreadPoolExecutor.java index d967b7423ca80..afffec4790873 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchThreadPoolExecutor.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchThreadPoolExecutor.java @@ -205,4 +205,15 @@ protected Runnable wrapRunnable(Runnable command) { protected Runnable unwrap(Runnable runnable) { return contextHolder.unwrap(runnable); } + + /** + * Returns the cumulative wait time of the ThreadPool. If the ThreadPool does not support tracking the cumulative pool wait time + * then this should return -1 which will prevent the value from showing up in {@link org.opensearch.threadpool.ThreadPoolStats}. + * ThreadPools that do support this metric should override this method. For example, {@link QueueResizingOpenSearchThreadPoolExecutor} + * does so using the {@link TimedRunnable} to get the difference between Runnable creation and execution. + * + */ + public long getPoolWaitTimeNanos() { + return -1; + } } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java b/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java index d3d0f6080e7f6..0cf8dc61c9254 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java @@ -118,8 +118,9 @@ private void addPending(List runnables, List pending, boolean TieBreakingPrioritizedRunnable t = (TieBreakingPrioritizedRunnable) runnable; Runnable innerRunnable = t.runnable; if (innerRunnable != null) { - /** innerRunnable can be null if task is finished but not removed from executor yet, - * see {@link TieBreakingPrioritizedRunnable#run} and {@link TieBreakingPrioritizedRunnable#runAndClean} + /* + innerRunnable can be null if task is finished but not removed from executor yet, + see {@link TieBreakingPrioritizedRunnable#run} and {@link TieBreakingPrioritizedRunnable#runAndClean} */ pending.add(new Pending(super.unwrap(innerRunnable), t.priority(), t.insertionOrder, executing)); } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/QueueResizingOpenSearchThreadPoolExecutor.java b/server/src/main/java/org/opensearch/common/util/concurrent/QueueResizingOpenSearchThreadPoolExecutor.java index b4097f6a9bb51..68bae3842b800 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/QueueResizingOpenSearchThreadPoolExecutor.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/QueueResizingOpenSearchThreadPoolExecutor.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.common.ExponentiallyWeightedMovingAverage; +import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.unit.TimeValue; import java.util.Locale; @@ -67,6 +68,7 @@ public final class QueueResizingOpenSearchThreadPoolExecutor extends OpenSearchT private final int maxQueueSize; private final long targetedResponseTimeNanos; private final ExponentiallyWeightedMovingAverage executionEWMA; + private final CounterMetric poolWaitTime; private final AtomicLong totalTaskNanos = new AtomicLong(0); private final AtomicInteger taskCount = new AtomicInteger(0); @@ -98,6 +100,7 @@ public final class QueueResizingOpenSearchThreadPoolExecutor extends OpenSearchT this.maxQueueSize = maxQueueSize; this.targetedResponseTimeNanos = targetedResponseTime.getNanos(); this.executionEWMA = new ExponentiallyWeightedMovingAverage(EWMA_ALPHA, 0); + this.poolWaitTime = new CounterMetric(); logger.debug( "thread pool [{}] will adjust queue by [{}] when determining automatic queue size", getName(), @@ -189,6 +192,7 @@ protected void afterExecute(Runnable r, Throwable t) { // taskExecutionNanos may be -1 if the task threw an exception executionEWMA.addValue(taskExecutionNanos); } + poolWaitTime.inc(timedRunnable.getWaitTimeNanos()); if (taskCount.incrementAndGet() == this.tasksPerFrame) { final long endTimeNs = System.nanoTime(); @@ -289,4 +293,8 @@ protected void appendThreadPoolExecutorDetails(StringBuilder sb) { sb.append("adjustment amount = ").append(QUEUE_ADJUSTMENT_AMOUNT).append(", "); } + @Override + public long getPoolWaitTimeNanos() { + return poolWaitTime.count(); + } } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 4888d25e4a640..fc2e4217bae79 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -143,10 +143,10 @@ public void unregisterThreadContextStatePropagator(final ThreadContextStatePropa */ public StoredContext stashContext() { final ThreadContextStruct context = threadLocal.get(); - /** - * X-Opaque-ID should be preserved in a threadContext in order to propagate this across threads. - * This is needed so the DeprecationLogger in another thread can see the value of X-Opaque-ID provided by a user. - * Otherwise when context is stash, it should be empty. + /* + X-Opaque-ID should be preserved in a threadContext in order to propagate this across threads. + This is needed so the DeprecationLogger in another thread can see the value of X-Opaque-ID provided by a user. + Otherwise when context is stash, it should be empty. */ ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT.putPersistent(context.persistentHeaders); diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/TimedRunnable.java b/server/src/main/java/org/opensearch/common/util/concurrent/TimedRunnable.java index f3bc50a33453b..2eb6657898008 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/TimedRunnable.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/TimedRunnable.java @@ -107,6 +107,14 @@ long getTotalExecutionNanos() { return Math.max(finishTimeNanos - startTimeNanos, 1); } + long getWaitTimeNanos() { + if (startTimeNanos == -1) { + // There must have been an exception thrown, the total time is unknown (-1) + return -1; + } + return Math.max(startTimeNanos - creationTimeNanos, 1); + } + /** * If the task was failed or rejected, return true. * Otherwise, false. diff --git a/server/src/main/java/org/opensearch/common/xcontent/LoggingDeprecationHandler.java b/server/src/main/java/org/opensearch/common/xcontent/LoggingDeprecationHandler.java index 06d139fa93195..05fc968737394 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/LoggingDeprecationHandler.java +++ b/server/src/main/java/org/opensearch/common/xcontent/LoggingDeprecationHandler.java @@ -54,7 +54,7 @@ public class LoggingDeprecationHandler implements DeprecationHandler { public static final LoggingDeprecationHandler INSTANCE = new LoggingDeprecationHandler(); /** * The logger to which to send deprecation messages. - * + *

          * This uses ParseField's logger because that is the logger that * we have been using for many releases for deprecated fields. * Changing that will require some research to make super duper diff --git a/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java index 798a58551457f..17bb0a1de267b 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java @@ -494,7 +494,7 @@ public static BytesReference toXContent(ToXContent toXContent, XContentType xCon /** * Returns the contents of an object as an unparsed BytesReference - * + *

          * This is useful for things like mappings where we're copying bytes around but don't * actually need to parse their contents, and so avoids building large maps of maps * unnecessarily diff --git a/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java b/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java index adfa871cbfcbe..a87edbb949d39 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java +++ b/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java @@ -117,12 +117,11 @@ private static void extractRawValues(List values, List part, String[] pa /** * For the provided path, return its value in the xContent map. - * + *

          * Note that in contrast with {@link XContentMapValues#extractRawValues}, array and object values * can be returned. * * @param path the value's path in the map. - * * @return the value associated with the path in the map or 'null' if the path does not exist. */ public static Object extractValue(String path, Map map) { @@ -138,7 +137,7 @@ public static Object extractValue(Map map, String... pathElements) { /** * For the provided path, return its value in the xContent map. - * + *

          * Note that in contrast with {@link XContentMapValues#extractRawValues}, array and object values * can be returned. * @@ -197,13 +196,13 @@ private static Object extractValue(String[] pathElements, int index, Object curr * Only keep properties in {@code map} that match the {@code includes} but * not the {@code excludes}. An empty list of includes is interpreted as a * wildcard while an empty list of excludes does not match anything. - * + *

          * If a property matches both an include and an exclude, then the exclude * wins. - * + *

          * If an object matches, then any of its sub properties are automatically * considered as matching as well, both for includes and excludes. - * + *

          * Dots in field names are treated as sub objects. So for instance if a * document contains {@code a.b} as a property and {@code a} is an include, * then {@code a.b} will be kept in the filtered map. @@ -555,7 +554,7 @@ public static Map nodeMapValue(Object node, String desc) { /** * Returns an array of string value from a node value. - * + *

          * If the node represents an array the corresponding array of strings is returned. * Otherwise the node is treated as a comma-separated string. */ diff --git a/server/src/main/java/org/opensearch/crypto/CryptoHandlerRegistry.java b/server/src/main/java/org/opensearch/crypto/CryptoHandlerRegistry.java new file mode 100644 index 0000000000000..0a14331be35f7 --- /dev/null +++ b/server/src/main/java/org/opensearch/crypto/CryptoHandlerRegistry.java @@ -0,0 +1,168 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.common.SetOnce; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.MasterKeyProvider; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.CryptoKeyProviderPlugin; +import org.opensearch.plugins.CryptoPlugin; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * During node bootstrap, installed key provider extensions responsible for generating data keys are loaded. + * Crypto factories against the respective KP plugins are cached. A crypto factory is used to register crypto + * handler against an {@link org.opensearch.common.blobstore.EncryptedBlobStore} + */ +public class CryptoHandlerRegistry { + private static final Logger logger = LogManager.getLogger(CryptoHandlerRegistry.class); + // Package private for tests + SetOnce> registry = new SetOnce<>(); + + // Package private for tests + SetOnce cryptoHandlerPlugin = new SetOnce<>(); + private final Map registeredCryptoHandlers = new HashMap<>(); + + private static volatile CryptoHandlerRegistry instance; + private static final Object lock = new Object(); + + /** + * Initializes the registry with crypto factories for the installed crypto key providers. + * + * @param cryptoPlugins The list of installed crypto key provider plugins. + * @param settings Crypto settings. + */ + protected CryptoHandlerRegistry( + List cryptoPlugins, + List cryptoKeyProviderPlugins, + Settings settings + ) { + if (cryptoPlugins == null || cryptoPlugins.size() == 0) { + return; + } + if (cryptoPlugins.size() > 1) { + // We can remove this to support multiple implementations in future if needed. + throw new IllegalStateException("More than 1 implementation of crypto plugin found."); + } + + cryptoHandlerPlugin.set(cryptoPlugins.get(0)); + registry.set(loadCryptoFactories(cryptoKeyProviderPlugins)); + } + + public static CryptoHandlerRegistry getInstance() { + return instance; + } + + public static CryptoHandlerRegistry initRegistry( + List cryptoPlugins, + List cryptoKeyProviderPlugins, + Settings settings + ) { + CryptoHandlerRegistry curInstance = instance; + if (curInstance == null) { + synchronized (lock) { + curInstance = instance; + if (curInstance == null) { + instance = curInstance = new CryptoHandlerRegistry(cryptoPlugins, cryptoKeyProviderPlugins, settings); + } + } + } + return curInstance; + } + + // For tests + protected Map loadCryptoFactories(List cryptoKPPlugins) { + Map cryptoFactories = new HashMap<>(); + for (CryptoKeyProviderPlugin cryptoKPPlugin : cryptoKPPlugins) { + if (cryptoFactories.containsKey(cryptoKPPlugin.type())) { + throw new IllegalArgumentException("Crypto plugin key provider type [" + cryptoKPPlugin.type() + "] is already registered"); + } + cryptoFactories.put(cryptoKPPlugin.type(), cryptoKPPlugin); + } + + return Map.copyOf(cryptoFactories); + } + + /** + * Retrieves the crypto factory associated with the given key provider type . + * + * @param keyProviderType The unique provider type for which the factory is to be fetched. + * @return The crypto factory used to create {@link CryptoHandler} + * instances in a {@link org.opensearch.common.blobstore.EncryptedBlobStore}. + * @throws IllegalStateException If the crypto registry is not yet loaded. + */ + public CryptoKeyProviderPlugin getCryptoKeyProviderPlugin(String keyProviderType) { + if (registry.get() == null) { + throw new IllegalStateException("Crypto registry is not yet loaded"); + } + return Objects.requireNonNull(registry.get()).get(keyProviderType); + } + + /** + * Fetches the cached crypto manager for the provided crypto metadata or creates a new one if not found. + * If the key provider is not installed, it throws a {@link CryptoRegistryException}. + * + * @param cryptoMetadata The crypto metadata for which the key provider is to be created. + * @return The crypto manager for performing encrypt/decrypt operations. + * @throws CryptoRegistryException If the key provider is not installed or there is an error during crypto manager creation. + */ + public CryptoHandler fetchCryptoHandler(CryptoMetadata cryptoMetadata) { + CryptoHandler cryptoHandler = registeredCryptoHandlers.get(cryptoMetadata); + if (cryptoHandler == null) { + synchronized (registeredCryptoHandlers) { + cryptoHandler = registeredCryptoHandlers.get(cryptoMetadata); + if (cryptoHandler == null) { + Runnable onClose = () -> { + synchronized (registeredCryptoHandlers) { + registeredCryptoHandlers.remove(cryptoMetadata); + } + }; + cryptoHandler = createCryptoHandler(cryptoMetadata, onClose); + registeredCryptoHandlers.put(cryptoMetadata, cryptoHandler); + } + } + } + return cryptoHandler; + } + + private CryptoHandler createCryptoHandler(CryptoMetadata cryptoMetadata, Runnable onClose) { + logger.debug("creating crypto client [{}][{}]", cryptoMetadata.keyProviderType(), cryptoMetadata.keyProviderName()); + CryptoKeyProviderPlugin keyProviderPlugin = getCryptoKeyProviderPlugin(cryptoMetadata.keyProviderType()); + if (keyProviderPlugin == null) { + throw new CryptoRegistryException(cryptoMetadata.keyProviderName(), cryptoMetadata.keyProviderType()); + } + + try { + MasterKeyProvider masterKeyProvider = keyProviderPlugin.createKeyProvider(cryptoMetadata); + return Objects.requireNonNull(cryptoHandlerPlugin.get()) + .getOrCreateCryptoHandler(masterKeyProvider, cryptoMetadata.keyProviderName(), cryptoMetadata.keyProviderType(), onClose); + + } catch (Exception e) { + logger.warn( + new ParameterizedMessage( + "failed to create crypto manager of name [{}] and type [{}]", + cryptoMetadata.keyProviderName(), + cryptoMetadata.keyProviderType() + ), + e + ); + throw new CryptoRegistryException(cryptoMetadata.keyProviderName(), cryptoMetadata.keyProviderType(), e); + } + } + +} diff --git a/server/src/main/java/org/opensearch/crypto/CryptoRegistryException.java b/server/src/main/java/org/opensearch/crypto/CryptoRegistryException.java new file mode 100644 index 0000000000000..a1b065649079d --- /dev/null +++ b/server/src/main/java/org/opensearch/crypto/CryptoRegistryException.java @@ -0,0 +1,127 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto; + +import org.opensearch.OpenSearchException; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; + +import java.io.IOException; + +/** + * Thrown when crypto manager creation or retrieval fails. + * + * @opensearch.internal + */ +public class CryptoRegistryException extends OpenSearchException { + private final String name; + private final String type; + private final RestStatus restStatus; + + /** + * Constructs a new CryptoRegistryException with the given client name and client type. + * + * @param clientName The name of the client for which the crypto registry is missing. + * @param clientType The type of the client for which the crypto registry is missing. + */ + public CryptoRegistryException(String clientName, String clientType) { + super("[Missing crypto registry for client name : " + clientName + " of type " + clientType + " ]"); + this.name = clientName; + this.type = clientType; + this.restStatus = RestStatus.NOT_FOUND; + } + + /** + * Constructs a new CryptoRegistryException with the given client name, client type, and a cause. + * + * @param clientName The name of the client that caused the exception. + * @param clientType The type of the client that caused the exception. + * @param cause The cause of the exception, which could be another throwable. + */ + public CryptoRegistryException(String clientName, String clientType, Throwable cause) { + super("[Client name : " + clientName + " Type " + clientType + " ]", cause); + this.name = clientName; + this.type = clientType; + if (cause instanceof IllegalArgumentException) { + this.restStatus = RestStatus.BAD_REQUEST; + } else { + this.restStatus = RestStatus.INTERNAL_SERVER_ERROR; + } + } + + /** + * Constructs a new CryptoRegistryException with the given client name, client type, and a custom message. + * + * @param clientName The name of the client that caused the exception. + * @param clientType The type of the client that caused the exception. + * @param msg A custom message to be included in the exception. + */ + public CryptoRegistryException(String clientName, String clientType, String msg) { + super("[ " + msg + " Client name : " + clientName + " type " + clientType + " ] "); + this.name = clientName; + this.type = clientType; + this.restStatus = RestStatus.INTERNAL_SERVER_ERROR; + } + + /** + * Get the HTTP status associated with this exception. + * + * @return The HTTP status code representing the nature of the exception. + */ + @Override + public RestStatus status() { + return restStatus; + } + + /** + * Get the name of the client associated with this exception. + * + * @return The name of the client for which the exception was raised. + */ + public String getName() { + return name; + } + + /** + * Get the type of the client associated with this exception. + * + * @return The type of the client for which the exception was raised. + */ + public String getType() { + return type; + } + + /** + * Constructs a new CryptoRegistryException by deserializing it from the provided input stream. + * + * @param in The input stream containing the serialized exception data. + * @throws IOException If an I/O error occurs while reading from the input stream. + */ + public CryptoRegistryException(StreamInput in) throws IOException { + super(in); + this.name = in.readString(); + this.type = in.readString(); + this.restStatus = RestStatus.fromCode(in.readInt()); + } + + /** + * Write the exception data to the provided output stream for serialization. + * + * @param out The output stream to which the exception data should be written. + * @throws IOException If an I/O error occurs while writing to the output stream. + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(name); + out.writeString(type); + out.writeInt(restStatus.getStatus()); + } +} diff --git a/server/src/main/java/org/opensearch/crypto/package-info.java b/server/src/main/java/org/opensearch/crypto/package-info.java new file mode 100644 index 0000000000000..742960ac1cf97 --- /dev/null +++ b/server/src/main/java/org/opensearch/crypto/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Package for crypto client abstractions and exceptions. + */ +package org.opensearch.crypto; diff --git a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java index 68fce4d9b9bb4..288371aa240a0 100644 --- a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.Coordinator; import org.opensearch.cluster.coordination.ElectionStrategy; +import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.AllocationService; @@ -52,6 +53,7 @@ import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.gateway.GatewayMetaState; import org.opensearch.monitor.NodeHealthService; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.plugins.DiscoveryPlugin; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -129,7 +131,9 @@ public DiscoveryModule( Path configFile, GatewayMetaState gatewayMetaState, RerouteService rerouteService, - NodeHealthService nodeHealthService + NodeHealthService nodeHealthService, + PersistedStateRegistry persistedStateRegistry, + RemoteStoreNodeService remoteStoreNodeService ) { final Collection> joinValidators = new ArrayList<>(); final Map> hostProviders = new HashMap<>(); @@ -205,7 +209,9 @@ public DiscoveryModule( new Random(Randomness.get().nextLong()), rerouteService, electionStrategy, - nodeHealthService + nodeHealthService, + persistedStateRegistry, + remoteStoreNodeService ); } else { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); diff --git a/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java b/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java index 665ecf77d7aa7..fb341ac2ac569 100644 --- a/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java +++ b/server/src/main/java/org/opensearch/discovery/DiscoveryStats.java @@ -32,8 +32,10 @@ package org.opensearch.discovery; +import org.opensearch.Version; import org.opensearch.cluster.coordination.PendingClusterStateStats; import org.opensearch.cluster.coordination.PublishClusterStateStats; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -51,21 +53,31 @@ public class DiscoveryStats implements Writeable, ToXContentFragment { private final PendingClusterStateStats queueStats; private final PublishClusterStateStats publishStats; + private final ClusterStateStats clusterStateStats; - public DiscoveryStats(PendingClusterStateStats queueStats, PublishClusterStateStats publishStats) { + public DiscoveryStats(PendingClusterStateStats queueStats, PublishClusterStateStats publishStats, ClusterStateStats clusterStateStats) { this.queueStats = queueStats; this.publishStats = publishStats; + this.clusterStateStats = clusterStateStats; } public DiscoveryStats(StreamInput in) throws IOException { queueStats = in.readOptionalWriteable(PendingClusterStateStats::new); publishStats = in.readOptionalWriteable(PublishClusterStateStats::new); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + clusterStateStats = in.readOptionalWriteable(ClusterStateStats::new); + } else { + clusterStateStats = null; + } } @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(queueStats); out.writeOptionalWriteable(publishStats); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalWriteable(clusterStateStats); + } } @Override @@ -77,6 +89,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (publishStats != null) { publishStats.toXContent(builder, params); } + if (clusterStateStats != null) { + clusterStateStats.toXContent(builder, params); + } builder.endObject(); return builder; } @@ -92,4 +107,8 @@ public PendingClusterStateStats getQueueStats() { public PublishClusterStateStats getPublishStats() { return publishStats; } + + public ClusterStateStats getClusterStateStats() { + return clusterStateStats; + } } diff --git a/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java b/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java index 3159733336057..b663227978e8f 100644 --- a/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java +++ b/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java @@ -48,12 +48,12 @@ /** * An implementation of {@link SeedHostsProvider} that reads hosts/ports * from {@link #UNICAST_HOSTS_FILE}. - * + *

          * Each host/port that is part of the discovery process must be listed on * a separate line. If the port is left off an entry, we default to the * first port in the {@code transport.port} range. * An example unicast hosts file could read: - * + *

          * 67.81.244.10 * 67.81.244.11:9305 * 67.81.244.15:9400 diff --git a/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java b/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java index 9785d5b21078e..10185322c2ca6 100644 --- a/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java +++ b/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java @@ -49,7 +49,7 @@ * An implementation of {@link SeedHostsProvider} that reads hosts/ports * from the "discovery.seed_hosts" node setting. If the port is * left off an entry, we default to the first port in the {@code transport.port} range. - * + *

          * An example setting might look as follows: * [67.81.244.10, 67.81.244.11:9305, 67.81.244.15:9400] * diff --git a/server/src/main/java/org/opensearch/env/Environment.java b/server/src/main/java/org/opensearch/env/Environment.java index 7ad0e3b011959..ade21ebdb8677 100644 --- a/server/src/main/java/org/opensearch/env/Environment.java +++ b/server/src/main/java/org/opensearch/env/Environment.java @@ -33,6 +33,7 @@ package org.opensearch.env; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; @@ -56,8 +57,9 @@ /** * The environment of where things exists. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") @SuppressForbidden(reason = "configures paths for the system") // TODO: move PathUtils to be package-private here instead of // public+forbidden api! @@ -247,7 +249,7 @@ public Path[] repoFiles() { /** * Resolves the specified location against the list of configured repository roots - * + *

          * If the specified location doesn't match any of the roots, returns null. */ public Path resolveRepoFile(String location) { @@ -257,7 +259,7 @@ public Path resolveRepoFile(String location) { /** * Checks if the specified URL is pointing to the local file system and if it does, resolves the specified url * against the list of configured repository roots - * + *

          * If the specified url doesn't match any of the roots, returns null. */ public URL resolveRepoURL(URL url) { diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index fb88d9092a092..58ab96be9f9af 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -53,6 +53,7 @@ import org.opensearch.common.Randomness; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Setting; @@ -108,14 +109,16 @@ /** * A component that holds all data paths for a single node. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class NodeEnvironment implements Closeable { /** * A node path. * - * @opensearch.internal + * @opensearch.api */ + @PublicApi(since = "1.0.0") public static class NodePath { /* ${data.paths}/nodes/{node.id} */ public final Path path; @@ -702,7 +705,7 @@ public List lockAllForIndex( * write operation on a shards data directory like deleting files, creating a new index writer * or recover from a different shard instance into it. If the shard lock can not be acquired * a {@link ShardLockObtainFailedException} is thrown. - * + *

          * Note: this method will return immediately if the lock can't be acquired. * * @param id the shard ID to lock @@ -775,7 +778,7 @@ public interface ShardLocker { /** * Returns all currently lock shards. - * + *

          * Note: the shard ids return do not contain a valid Index UUID */ public Set lockedShards() { diff --git a/server/src/main/java/org/opensearch/env/ShardLock.java b/server/src/main/java/org/opensearch/env/ShardLock.java index dd34eb3275f68..76afc0ec0329a 100644 --- a/server/src/main/java/org/opensearch/env/ShardLock.java +++ b/server/src/main/java/org/opensearch/env/ShardLock.java @@ -32,6 +32,7 @@ package org.opensearch.env; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.shard.ShardId; import java.io.Closeable; @@ -44,8 +45,9 @@ * * @see NodeEnvironment * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class ShardLock implements Closeable { private final ShardId shardId; diff --git a/server/src/main/java/org/opensearch/env/ShardLockObtainFailedException.java b/server/src/main/java/org/opensearch/env/ShardLockObtainFailedException.java index 525d8a76c9699..ae77d942356b5 100644 --- a/server/src/main/java/org/opensearch/env/ShardLockObtainFailedException.java +++ b/server/src/main/java/org/opensearch/env/ShardLockObtainFailedException.java @@ -33,6 +33,7 @@ package org.opensearch.env; import org.opensearch.OpenSearchException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; @@ -41,8 +42,9 @@ /** * Exception used when the in-memory lock for a shard cannot be obtained * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ShardLockObtainFailedException extends OpenSearchException { public ShardLockObtainFailedException(ShardId shardId, String message) { diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionReader.java b/server/src/main/java/org/opensearch/extensions/ExtensionReader.java index 4556d6537ffdc..fc2ba817ace5f 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionReader.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionReader.java @@ -17,7 +17,7 @@ * a category class used to identify the reader defined within the JVM that the extension is running on. * Additionally, this method takes in the extension's corresponding DiscoveryNode and a byte array (context) that the * extension's reader will be applied to. - * + *

          * By convention the extensions' reader is a constructor that takes StreamInput as an argument for most classes and a static method for things like enums. * Classes will implement this via a constructor (or a static method in the case of enumerations), it's something that should * look like: diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java index 8073fb9cb7f56..3e71fb16c10ae 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java @@ -289,7 +289,7 @@ private void registerRequestHandler(DynamicActionRegistry dynamicActionRegistry) * Loads a single extension * @param extension The extension to be loaded */ - public void loadExtension(Extension extension) throws IOException { + public DiscoveryExtensionNode loadExtension(Extension extension) throws IOException { validateExtension(extension); DiscoveryExtensionNode discoveryExtensionNode = new DiscoveryExtensionNode( extension.getName(), @@ -303,6 +303,12 @@ public void loadExtension(Extension extension) throws IOException { extensionIdMap.put(extension.getUniqueId(), discoveryExtensionNode); extensionSettingsMap.put(extension.getUniqueId(), extension); logger.info("Loaded extension with uniqueId " + extension.getUniqueId() + ": " + extension); + return discoveryExtensionNode; + } + + public void initializeExtension(Extension extension) throws IOException { + DiscoveryExtensionNode node = loadExtension(extension); + initializeExtensionNode(node); } private void validateField(String fieldName, String value) throws IOException { @@ -329,11 +335,11 @@ private void validateExtension(Extension extension) throws IOException { */ public void initialize() { for (DiscoveryExtensionNode extension : extensionIdMap.values()) { - initializeExtension(extension); + initializeExtensionNode(extension); } } - private void initializeExtension(DiscoveryExtensionNode extension) { + public void initializeExtensionNode(DiscoveryExtensionNode extensionNode) { final CompletableFuture inProgressFuture = new CompletableFuture<>(); final TransportResponseHandler initializeExtensionResponseHandler = new TransportResponseHandler< @@ -373,7 +379,8 @@ public String executor() { transportService.getThreadPool().generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { - extensionIdMap.remove(extension.getId()); + logger.warn("Error registering extension: " + extensionNode.getId(), e); + extensionIdMap.remove(extensionNode.getId()); if (e.getCause() instanceof ConnectTransportException) { logger.info("No response from extension to request.", e); throw (ConnectTransportException) e.getCause(); @@ -388,11 +395,11 @@ public void onFailure(Exception e) { @Override protected void doRun() throws Exception { - transportService.connectToExtensionNode(extension); + transportService.connectToExtensionNode(extensionNode); transportService.sendRequest( - extension, + extensionNode, REQUEST_EXTENSION_ACTION_NAME, - new InitializeExtensionRequest(transportService.getLocalNode(), extension), + new InitializeExtensionRequest(transportService.getLocalNode(), extensionNode), initializeExtensionResponseHandler ); } diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java b/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java index b6d628ae9253f..18adc37156ca1 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java @@ -58,6 +58,9 @@ public TransportResponse handleRegisterRestActionsRequest( DynamicActionRegistry dynamicActionRegistry ) throws Exception { DiscoveryExtensionNode discoveryExtensionNode = extensionIdMap.get(restActionsRequest.getUniqueId()); + if (discoveryExtensionNode == null) { + throw new IllegalStateException("Missing extension node for " + restActionsRequest.getUniqueId()); + } RestHandler handler = new RestSendToExtensionAction( restActionsRequest, discoveryExtensionNode, diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java index 4b622b841a040..fc7c21a6eccd6 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java @@ -159,8 +159,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client extAdditionalSettings ); try { - extensionsManager.loadExtension(extension); - extensionsManager.initialize(); + extensionsManager.initializeExtension(extension); } catch (CompletionException e) { Throwable cause = e.getCause(); if (cause instanceof TimeoutException) { diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java index d33ddd3848fe4..cb84d36380c3c 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java @@ -151,7 +151,7 @@ public RestSendToExtensionAction( @Override public String getName() { - return SEND_TO_EXTENSION_ACTION; + return this.discoveryExtensionNode.getId() + ":" + SEND_TO_EXTENSION_ACTION; } @Override diff --git a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java index 59ef894958cbe..853fe03904c53 100644 --- a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java @@ -50,7 +50,7 @@ /** * An abstract class that implements basic functionality for allocating * shards to nodes based on shard copies that already exist in the cluster. - * + *

          * Individual implementations of this class are responsible for providing * the logic to determine to which nodes (if any) those shards are allocated. * diff --git a/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java b/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java index e7c1ba01e7920..48479691689e5 100644 --- a/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java +++ b/server/src/main/java/org/opensearch/gateway/DanglingIndicesState.java @@ -244,7 +244,7 @@ List filterDanglingIndices(Metadata metadata, Map * Dangling importing indices with aliases is dangerous, it could for instance result in inability to write to an existing alias if it * previously had only one index with any is_write_index indication. */ diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index ed763e758ae41..24183f2d2675f 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -46,6 +46,9 @@ import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.coordination.CoordinationState.PersistedState; import org.opensearch.cluster.coordination.InMemoryPersistedState; +import org.opensearch.cluster.coordination.PersistedStateRegistry; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; +import org.opensearch.cluster.coordination.PersistedStateStats; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexTemplateMetadata; import org.opensearch.cluster.metadata.Manifest; @@ -53,7 +56,6 @@ import org.opensearch.cluster.metadata.MetadataIndexUpgradeService; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.SetOnce; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AbstractRunnable; @@ -61,6 +63,10 @@ import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; import org.opensearch.common.util.io.IOUtils; import org.opensearch.env.NodeMetadata; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.index.recovery.RemoteStoreRestoreService; +import org.opensearch.index.recovery.RemoteStoreRestoreService.RemoteRestoreResult; import org.opensearch.node.Node; import org.opensearch.plugins.MetadataUpgrader; import org.opensearch.threadpool.ThreadPool; @@ -73,6 +79,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; @@ -81,37 +88,37 @@ import java.util.function.UnaryOperator; import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; /** * Loads (and maybe upgrades) cluster metadata at startup, and persistently stores cluster metadata for future restarts. - * - * When started, ensures that this version is compatible with the state stored on disk, and performs a state upgrade if necessary. Note that - * the state being loaded when constructing the instance of this class is not necessarily the state that will be used as {@link - * ClusterState#metadata()} because it might be stale or incomplete. Cluster-manager-eligible nodes must perform an election to find a complete and - * non-stale state, and cluster-manager-ineligible nodes receive the real cluster state from the elected cluster-manager after joining the cluster. + *

          + * When started, ensures that this version is compatible with the state stored on disk, and performs a state upgrade if necessary. Note that the state being + * loaded when constructing the instance of this class is not necessarily the state that will be used as {@link ClusterState#metadata()} because it might be + * stale or incomplete. Cluster-manager-eligible nodes must perform an election to find a complete and non-stale state, and cluster-manager-ineligible nodes + * receive the real cluster state from the elected cluster-manager after joining the cluster. * * @opensearch.internal */ public class GatewayMetaState implements Closeable { /** - * Fake node ID for a voting configuration written by a cluster-manager-ineligible data node to indicate that its on-disk state is potentially - * stale (since it is written asynchronously after application, rather than before acceptance). This node ID means that if the node is - * restarted as a cluster-manager-eligible node then it does not win any elections until it has received a fresh cluster state. + * Fake node ID for a voting configuration written by a cluster-manager-ineligible data node to indicate that its on-disk state is potentially stale (since + * it is written asynchronously after application, rather than before acceptance). This node ID means that if the node is restarted as a + * cluster-manager-eligible node then it does not win any elections until it has received a fresh cluster state. */ public static final String STALE_STATE_CONFIG_NODE_ID = "STALE_STATE_CONFIG"; - // Set by calling start() - private final SetOnce persistedState = new SetOnce<>(); + private PersistedStateRegistry persistedStateRegistry; public PersistedState getPersistedState() { - final PersistedState persistedState = this.persistedState.get(); + final PersistedState persistedState = persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL); assert persistedState != null : "not started"; return persistedState; } public Metadata getMetadata() { - return getPersistedState().getLastAcceptedState().metadata(); + return persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL).getLastAcceptedState().metadata(); } public void start( @@ -121,9 +128,13 @@ public void start( MetaStateService metaStateService, MetadataIndexUpgradeService metadataIndexUpgradeService, MetadataUpgrader metadataUpgrader, - PersistedClusterStateService persistedClusterStateService + PersistedClusterStateService persistedClusterStateService, + RemoteClusterStateService remoteClusterStateService, + PersistedStateRegistry persistedStateRegistry, + RemoteStoreRestoreService remoteStoreRestoreService ) { - assert persistedState.get() == null : "should only start once, but already have " + persistedState.get(); + assert this.persistedStateRegistry == null : "Persisted state registry should only be set once"; + this.persistedStateRegistry = persistedStateRegistry; if (DiscoveryNode.isClusterManagerNode(settings) || DiscoveryNode.isDataNode(settings)) { try { @@ -145,14 +156,45 @@ public void start( } PersistedState persistedState = null; + PersistedState remotePersistedState = null; boolean success = false; try { - final ClusterState clusterState = prepareInitialClusterState( + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)) + .version(lastAcceptedVersion) + .metadata(metadata) + .build(); + + if (DiscoveryNode.isClusterManagerNode(settings) && isRemoteStoreClusterStateEnabled(settings)) { + // If the cluster UUID loaded from local is unknown (_na_) then fetch the best state from remote + // If there is no valid state on remote, continue with initial empty state + // If there is a valid state, then restore index metadata using this state + String lastKnownClusterUUID = ClusterState.UNKNOWN_UUID; + if (ClusterState.UNKNOWN_UUID.equals(clusterState.metadata().clusterUUID())) { + lastKnownClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote( + clusterState.getClusterName().value() + ); + if (ClusterState.UNKNOWN_UUID.equals(lastKnownClusterUUID) == false) { + // Load state from remote + final RemoteRestoreResult remoteRestoreResult = remoteStoreRestoreService.restore( + // Remote Metadata should always override local disk Metadata + // if local disk Metadata's cluster uuid is UNKNOWN_UUID + ClusterState.builder(clusterState).metadata(Metadata.EMPTY_METADATA).build(), + lastKnownClusterUUID, + false, + new String[] {} + ); + clusterState = remoteRestoreResult.getClusterState(); + } + } + remotePersistedState = new RemotePersistedState(remoteClusterStateService, lastKnownClusterUUID); + } + + // Recovers Cluster and Index level blocks + clusterState = prepareInitialClusterState( transportService, clusterService, - ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)) - .version(lastAcceptedVersion) - .metadata(upgradeMetadataForNode(metadata, metadataIndexUpgradeService, metadataUpgrader)) + ClusterState.builder(clusterState) + .metadata(upgradeMetadataForNode(clusterState.metadata(), metadataIndexUpgradeService, metadataUpgrader)) .build() ); @@ -178,11 +220,14 @@ public void start( success = true; } finally { if (success == false) { - IOUtils.closeWhileHandlingException(persistedState); + IOUtils.closeWhileHandlingException(persistedStateRegistry); } } - this.persistedState.set(persistedState); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, persistedState); + if (remotePersistedState != null) { + persistedStateRegistry.addPersistedState(PersistedStateType.REMOTE, remotePersistedState); + } } catch (IOException e) { throw new OpenSearchException("failed to load metadata", e); } @@ -209,7 +254,7 @@ public void start( throw new UncheckedIOException(e); } } - persistedState.set(new InMemoryPersistedState(currentTerm, clusterState)); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, new InMemoryPersistedState(currentTerm, clusterState)); } } @@ -235,8 +280,8 @@ Metadata upgradeMetadataForNode( } /** - * This method calls {@link MetadataIndexUpgradeService} to makes sure that indices are compatible with the current - * version. The MetadataIndexUpgradeService might also update obsolete settings if needed. + * This method calls {@link MetadataIndexUpgradeService} to makes sure that indices are compatible with the current version. The MetadataIndexUpgradeService + * might also update obsolete settings if needed. * * @return input metadata if no upgrade is needed or an upgraded metadata */ @@ -328,12 +373,14 @@ public void applyClusterState(ClusterChangedEvent event) { @Override public void close() throws IOException { - IOUtils.close(persistedState.get()); + IOUtils.close(persistedStateRegistry); } // visible for testing public boolean allPendingAsyncStatesWritten() { - final PersistedState ps = persistedState.get(); + // This method is invoked for persisted state implementations which write asynchronously. + // RemotePersistedState is invoked in synchronous path. So this logic is not required for remote state. + final PersistedState ps = persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL); if (ps instanceof AsyncLucenePersistedState) { return ((AsyncLucenePersistedState) ps).allPendingAsyncStatesWritten(); } else { @@ -506,6 +553,9 @@ static class LucenePersistedState implements PersistedState { // out by this version of OpenSearch. TODO TBD should we avoid indexing when possible? final PersistedClusterStateService.Writer writer = persistedClusterStateService.createWriter(); try { + // During remote state restore, there will be non empty metadata getting persisted with cluster UUID as + // ClusterState.UNKOWN_UUID . The valid UUID will be generated and persisted along with the first cluster state getting + // published. writer.writeFullStateAndCommit(currentTerm, lastAcceptedState); } catch (Exception e) { try { @@ -567,6 +617,12 @@ public void setLastAcceptedState(ClusterState clusterState) { lastAcceptedState = clusterState; } + @Override + public PersistedStateStats getStats() { + // Note: These stats are not published yet, will come in future + return null; + } + private PersistedClusterStateService.Writer getWriterSafe() { final PersistedClusterStateService.Writer writer = persistenceWriter.get(); if (writer == null) { @@ -600,4 +656,136 @@ public void close() throws IOException { IOUtils.close(persistenceWriter.getAndSet(null)); } } + + /** + * Encapsulates the writing of metadata to a remote store using {@link RemoteClusterStateService}. + */ + public static class RemotePersistedState implements PersistedState { + + private static final Logger logger = LogManager.getLogger(RemotePersistedState.class); + + private ClusterState lastAcceptedState; + private ClusterMetadataManifest lastAcceptedManifest; + private final RemoteClusterStateService remoteClusterStateService; + private String previousClusterUUID; + + public RemotePersistedState(final RemoteClusterStateService remoteClusterStateService, final String previousClusterUUID) { + this.remoteClusterStateService = remoteClusterStateService; + this.previousClusterUUID = previousClusterUUID; + } + + @Override + public long getCurrentTerm() { + return lastAcceptedState != null ? lastAcceptedState.term() : 0L; + } + + @Override + public ClusterState getLastAcceptedState() { + return lastAcceptedState; + } + + @Override + public void setCurrentTerm(long currentTerm) { + // no-op + // For LucenePersistedState, setCurrentTerm is used only while handling StartJoinRequest by all follower nodes. + // But for RemotePersistedState, the state is only pushed by the active cluster. So this method is not required. + } + + @Override + public void setLastAcceptedState(ClusterState clusterState) { + try { + final ClusterMetadataManifest manifest; + if (shouldWriteFullClusterState(clusterState)) { + final Optional latestManifest = remoteClusterStateService.getLatestClusterMetadataManifest( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); + if (latestManifest.isPresent()) { + // The previous UUID should not change for the current UUID. So fetching the latest manifest + // from remote store and getting the previous UUID. + previousClusterUUID = latestManifest.get().getPreviousClusterUUID(); + } else { + // When the user starts the cluster with remote state disabled but later enables the remote state, + // there will not be any manifest for the current cluster UUID. + logger.error( + "Latest manifest is not present in remote store for cluster UUID: {}", + clusterState.metadata().clusterUUID() + ); + } + manifest = remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID); + } else { + assert verifyManifestAndClusterState(lastAcceptedManifest, lastAcceptedState) == true + : "Previous manifest and previous ClusterState are not in sync"; + manifest = remoteClusterStateService.writeIncrementalMetadata(lastAcceptedState, clusterState, lastAcceptedManifest); + } + assert verifyManifestAndClusterState(manifest, clusterState) == true : "Manifest and ClusterState are not in sync"; + lastAcceptedManifest = manifest; + lastAcceptedState = clusterState; + } catch (Exception e) { + remoteClusterStateService.writeMetadataFailed(); + handleExceptionOnWrite(e); + } + } + + @Override + public PersistedStateStats getStats() { + return remoteClusterStateService.getStats(); + } + + private boolean verifyManifestAndClusterState(ClusterMetadataManifest manifest, ClusterState clusterState) { + assert manifest != null : "ClusterMetadataManifest is null"; + assert clusterState != null : "ClusterState is null"; + assert clusterState.metadata().indices().size() == manifest.getIndices().size() + : "Number of indices in last accepted state and manifest are different"; + manifest.getIndices().stream().forEach(md -> { + assert clusterState.metadata().indices().containsKey(md.getIndexName()) + : "Last accepted state does not contain the index : " + md.getIndexName(); + assert clusterState.metadata().indices().get(md.getIndexName()).getIndexUUID().equals(md.getIndexUUID()) + : "Last accepted state and manifest do not have same UUID for index : " + md.getIndexName(); + }); + return true; + } + + private boolean shouldWriteFullClusterState(ClusterState clusterState) { + if (lastAcceptedState == null + || lastAcceptedManifest == null + || lastAcceptedState.term() != clusterState.term() + || lastAcceptedManifest.getOpensearchVersion() != Version.CURRENT) { + return true; + } + return false; + } + + @Override + public void markLastAcceptedStateAsCommitted() { + try { + assert lastAcceptedState != null : "Last accepted state is not present"; + assert lastAcceptedManifest != null : "Last accepted manifest is not present"; + ClusterState clusterState = lastAcceptedState; + if (lastAcceptedState.metadata().clusterUUID().equals(Metadata.UNKNOWN_CLUSTER_UUID) == false + && lastAcceptedState.metadata().clusterUUIDCommitted() == false) { + Metadata.Builder metadataBuilder = Metadata.builder(lastAcceptedState.metadata()); + metadataBuilder.clusterUUIDCommitted(true); + clusterState = ClusterState.builder(lastAcceptedState).metadata(metadataBuilder).build(); + } + final ClusterMetadataManifest committedManifest = remoteClusterStateService.markLastStateAsCommitted( + clusterState, + lastAcceptedManifest + ); + lastAcceptedManifest = committedManifest; + lastAcceptedState = clusterState; + } catch (Exception e) { + handleExceptionOnWrite(e); + } + } + + @Override + public void close() throws IOException { + remoteClusterStateService.close(); + } + + private void handleExceptionOnWrite(Exception e) { + throw ExceptionsHelper.convertToRuntime(e); + } + } } diff --git a/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java index 8db4736bcdc40..75beb6e29599c 100644 --- a/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java @@ -111,16 +111,16 @@ * Stores cluster metadata in a bare Lucene index (per data path) split across a number of documents. This is used by cluster-manager-eligible nodes * to record the last-accepted cluster state during publication. The metadata is written incrementally where possible, leaving alone any * documents that have not changed. The index has the following fields: - * + *

          * +------------------------------+-----------------------------+----------------------------------------------+ * | "type" (string field) | "index_uuid" (string field) | "data" (stored binary field in SMILE format) | * +------------------------------+-----------------------------+----------------------------------------------+ * | GLOBAL_TYPE_NAME == "global" | (omitted) | Global metadata | * | INDEX_TYPE_NAME == "index" | Index UUID | Index metadata | * +------------------------------+-----------------------------+----------------------------------------------+ - * + *

          * Additionally each commit has the following user data: - * + *

          * +---------------------------+-------------------------+-------------------------------------------------------------------------------+ * | Key symbol | Key literal | Value | * +---------------------------+-------------------------+-------------------------------------------------------------------------------+ @@ -129,7 +129,7 @@ * | NODE_ID_KEY | "node_id" | The (persistent) ID of the node that wrote this metadata | * | NODE_VERSION_KEY | "node_version" | The (ID of the) version of the node that wrote this metadata | * +---------------------------+-------------------------+-------------------------------------------------------------------------------+ - * + *

          * (the last-accepted term is recorded in Metadata → CoordinationMetadata so does not need repeating here) * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java index 4dc9396751fc9..2807be00feeaa 100644 --- a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java @@ -69,7 +69,7 @@ * that holds a copy of the shard. The shard metadata from each node is compared against the * set of valid allocation IDs and for all valid shard copies (if any), the primary shard allocator * executes the allocation deciders to chose a copy to assign the primary shard to. - * + *

          * Note that the PrimaryShardAllocator does *not* allocate primaries on index creation * (see {@link org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator}), * nor does it allocate primaries when a primary shard failed and there is a valid replica @@ -386,11 +386,11 @@ protected static NodeShardsResult buildNodeShardsResult( } } - /** - * Orders the active shards copies based on below comparators - * 1. No store exception i.e. shard copy is readable - * 2. Prefer previous primary shard - * 3. Prefer shard copy with the highest replication checkpoint. It is NO-OP for doc rep enabled indices. + /* + Orders the active shards copies based on below comparators + 1. No store exception i.e. shard copy is readable + 2. Prefer previous primary shard + 3. Prefer shard copy with the highest replication checkpoint. It is NO-OP for doc rep enabled indices. */ final Comparator comparator; // allocation preference if (matchAnyShard) { diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java index a6c9319b6dc54..f530052c5bcd1 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java @@ -295,7 +295,7 @@ public AllocateUnassignedDecision makeAllocationDecision( /** * Determines if the shard can be allocated on at least one node based on the allocation deciders. - * + *

          * Returns the best allocation decision for allocating the shard on any node (i.e. YES if at least one * node decided YES, THROTTLE if at least one node decided THROTTLE, and NO if none of the nodes decided * YES or THROTTLE). If in explain mode, also returns the node-level explanations as the second element diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java new file mode 100644 index 0000000000000..4725f40076ce2 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterMetadataManifest.java @@ -0,0 +1,616 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.Version; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * Manifest file which contains the details of the uploaded entity metadata + * + * @opensearch.internal + */ +public class ClusterMetadataManifest implements Writeable, ToXContentFragment { + + public static final int CODEC_V0 = 0; // Older codec version, where we haven't introduced codec versions for manifest. + public static final int CODEC_V1 = 1; // In Codec V1 we have introduced global-metadata and codec version in Manifest file. + + private static final ParseField CLUSTER_TERM_FIELD = new ParseField("cluster_term"); + private static final ParseField STATE_VERSION_FIELD = new ParseField("state_version"); + private static final ParseField CLUSTER_UUID_FIELD = new ParseField("cluster_uuid"); + private static final ParseField STATE_UUID_FIELD = new ParseField("state_uuid"); + private static final ParseField OPENSEARCH_VERSION_FIELD = new ParseField("opensearch_version"); + private static final ParseField NODE_ID_FIELD = new ParseField("node_id"); + private static final ParseField COMMITTED_FIELD = new ParseField("committed"); + private static final ParseField CODEC_VERSION_FIELD = new ParseField("codec_version"); + private static final ParseField GLOBAL_METADATA_FIELD = new ParseField("global_metadata"); + private static final ParseField INDICES_FIELD = new ParseField("indices"); + private static final ParseField PREVIOUS_CLUSTER_UUID = new ParseField("previous_cluster_uuid"); + private static final ParseField CLUSTER_UUID_COMMITTED = new ParseField("cluster_uuid_committed"); + + private static long term(Object[] fields) { + return (long) fields[0]; + } + + private static long version(Object[] fields) { + return (long) fields[1]; + } + + private static String clusterUUID(Object[] fields) { + return (String) fields[2]; + } + + private static String stateUUID(Object[] fields) { + return (String) fields[3]; + } + + private static Version opensearchVersion(Object[] fields) { + return Version.fromId((int) fields[4]); + } + + private static String nodeId(Object[] fields) { + return (String) fields[5]; + } + + private static boolean committed(Object[] fields) { + return (boolean) fields[6]; + } + + private static List indices(Object[] fields) { + return (List) fields[7]; + } + + private static String previousClusterUUID(Object[] fields) { + return (String) fields[8]; + } + + private static boolean clusterUUIDCommitted(Object[] fields) { + return (boolean) fields[9]; + } + + private static int codecVersion(Object[] fields) { + return (int) fields[10]; + } + + private static String globalMetadataFileName(Object[] fields) { + return (String) fields[11]; + } + + private static final ConstructingObjectParser PARSER_V0 = new ConstructingObjectParser<>( + "cluster_metadata_manifest", + fields -> new ClusterMetadataManifest( + term(fields), + version(fields), + clusterUUID(fields), + stateUUID(fields), + opensearchVersion(fields), + nodeId(fields), + committed(fields), + CODEC_V0, + null, + indices(fields), + previousClusterUUID(fields), + clusterUUIDCommitted(fields) + ) + ); + + private static final ConstructingObjectParser PARSER_V1 = new ConstructingObjectParser<>( + "cluster_metadata_manifest", + fields -> new ClusterMetadataManifest( + term(fields), + version(fields), + clusterUUID(fields), + stateUUID(fields), + opensearchVersion(fields), + nodeId(fields), + committed(fields), + codecVersion(fields), + globalMetadataFileName(fields), + indices(fields), + previousClusterUUID(fields), + clusterUUIDCommitted(fields) + ) + ); + + private static final ConstructingObjectParser CURRENT_PARSER = PARSER_V1; + + static { + declareParser(PARSER_V0, CODEC_V0); + declareParser(PARSER_V1, CODEC_V1); + } + + private static void declareParser(ConstructingObjectParser parser, long codec_version) { + parser.declareLong(ConstructingObjectParser.constructorArg(), CLUSTER_TERM_FIELD); + parser.declareLong(ConstructingObjectParser.constructorArg(), STATE_VERSION_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), CLUSTER_UUID_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), STATE_UUID_FIELD); + parser.declareInt(ConstructingObjectParser.constructorArg(), OPENSEARCH_VERSION_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), NODE_ID_FIELD); + parser.declareBoolean(ConstructingObjectParser.constructorArg(), COMMITTED_FIELD); + parser.declareObjectArray( + ConstructingObjectParser.constructorArg(), + (p, c) -> UploadedIndexMetadata.fromXContent(p), + INDICES_FIELD + ); + parser.declareString(ConstructingObjectParser.constructorArg(), PREVIOUS_CLUSTER_UUID); + parser.declareBoolean(ConstructingObjectParser.constructorArg(), CLUSTER_UUID_COMMITTED); + + if (codec_version >= CODEC_V1) { + parser.declareInt(ConstructingObjectParser.constructorArg(), CODEC_VERSION_FIELD); + parser.declareString(ConstructingObjectParser.constructorArg(), GLOBAL_METADATA_FIELD); + } + } + + private final int codecVersion; + private final String globalMetadataFileName; + private final List indices; + private final long clusterTerm; + private final long stateVersion; + private final String clusterUUID; + private final String stateUUID; + private final Version opensearchVersion; + private final String nodeId; + private final boolean committed; + private final String previousClusterUUID; + private final boolean clusterUUIDCommitted; + + public List getIndices() { + return indices; + } + + public long getClusterTerm() { + return clusterTerm; + } + + public long getStateVersion() { + return stateVersion; + } + + public String getClusterUUID() { + return clusterUUID; + } + + public String getStateUUID() { + return stateUUID; + } + + public Version getOpensearchVersion() { + return opensearchVersion; + } + + public String getNodeId() { + return nodeId; + } + + public boolean isCommitted() { + return committed; + } + + public String getPreviousClusterUUID() { + return previousClusterUUID; + } + + public boolean isClusterUUIDCommitted() { + return clusterUUIDCommitted; + } + + public int getCodecVersion() { + return codecVersion; + } + + public String getGlobalMetadataFileName() { + return globalMetadataFileName; + } + + public ClusterMetadataManifest( + long clusterTerm, + long version, + String clusterUUID, + String stateUUID, + Version opensearchVersion, + String nodeId, + boolean committed, + int codecVersion, + String globalMetadataFileName, + List indices, + String previousClusterUUID, + boolean clusterUUIDCommitted + ) { + this.clusterTerm = clusterTerm; + this.stateVersion = version; + this.clusterUUID = clusterUUID; + this.stateUUID = stateUUID; + this.opensearchVersion = opensearchVersion; + this.nodeId = nodeId; + this.committed = committed; + this.codecVersion = codecVersion; + this.globalMetadataFileName = globalMetadataFileName; + this.indices = Collections.unmodifiableList(indices); + this.previousClusterUUID = previousClusterUUID; + this.clusterUUIDCommitted = clusterUUIDCommitted; + } + + public ClusterMetadataManifest(StreamInput in) throws IOException { + this.clusterTerm = in.readVLong(); + this.stateVersion = in.readVLong(); + this.clusterUUID = in.readString(); + this.stateUUID = in.readString(); + this.opensearchVersion = Version.fromId(in.readInt()); + this.nodeId = in.readString(); + this.committed = in.readBoolean(); + this.indices = Collections.unmodifiableList(in.readList(UploadedIndexMetadata::new)); + this.previousClusterUUID = in.readString(); + this.clusterUUIDCommitted = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + this.codecVersion = in.readInt(); + this.globalMetadataFileName = in.readString(); + } else { + this.codecVersion = CODEC_V0; // Default codec + this.globalMetadataFileName = null; + } + } + + public static Builder builder() { + return new Builder(); + } + + public static Builder builder(ClusterMetadataManifest manifest) { + return new Builder(manifest); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(CLUSTER_TERM_FIELD.getPreferredName(), getClusterTerm()) + .field(STATE_VERSION_FIELD.getPreferredName(), getStateVersion()) + .field(CLUSTER_UUID_FIELD.getPreferredName(), getClusterUUID()) + .field(STATE_UUID_FIELD.getPreferredName(), getStateUUID()) + .field(OPENSEARCH_VERSION_FIELD.getPreferredName(), getOpensearchVersion().id) + .field(NODE_ID_FIELD.getPreferredName(), getNodeId()) + .field(COMMITTED_FIELD.getPreferredName(), isCommitted()); + builder.startArray(INDICES_FIELD.getPreferredName()); + { + for (UploadedIndexMetadata uploadedIndexMetadata : indices) { + uploadedIndexMetadata.toXContent(builder, params); + } + } + builder.endArray(); + builder.field(PREVIOUS_CLUSTER_UUID.getPreferredName(), getPreviousClusterUUID()); + builder.field(CLUSTER_UUID_COMMITTED.getPreferredName(), isClusterUUIDCommitted()); + if (onOrAfterCodecVersion(CODEC_V1)) { + builder.field(CODEC_VERSION_FIELD.getPreferredName(), getCodecVersion()); + builder.field(GLOBAL_METADATA_FIELD.getPreferredName(), getGlobalMetadataFileName()); + } + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(clusterTerm); + out.writeVLong(stateVersion); + out.writeString(clusterUUID); + out.writeString(stateUUID); + out.writeInt(opensearchVersion.id); + out.writeString(nodeId); + out.writeBoolean(committed); + out.writeCollection(indices); + out.writeString(previousClusterUUID); + out.writeBoolean(clusterUUIDCommitted); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeInt(codecVersion); + out.writeString(globalMetadataFileName); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ClusterMetadataManifest that = (ClusterMetadataManifest) o; + return Objects.equals(indices, that.indices) + && clusterTerm == that.clusterTerm + && stateVersion == that.stateVersion + && Objects.equals(clusterUUID, that.clusterUUID) + && Objects.equals(stateUUID, that.stateUUID) + && Objects.equals(opensearchVersion, that.opensearchVersion) + && Objects.equals(nodeId, that.nodeId) + && Objects.equals(committed, that.committed) + && Objects.equals(previousClusterUUID, that.previousClusterUUID) + && Objects.equals(clusterUUIDCommitted, that.clusterUUIDCommitted) + && Objects.equals(globalMetadataFileName, that.globalMetadataFileName) + && Objects.equals(codecVersion, that.codecVersion); + } + + @Override + public int hashCode() { + return Objects.hash( + codecVersion, + globalMetadataFileName, + indices, + clusterTerm, + stateVersion, + clusterUUID, + stateUUID, + opensearchVersion, + nodeId, + committed, + previousClusterUUID, + clusterUUIDCommitted + ); + } + + @Override + public String toString() { + return Strings.toString(MediaTypeRegistry.JSON, this); + } + + public boolean onOrAfterCodecVersion(int codecVersion) { + return this.codecVersion >= codecVersion; + } + + public static ClusterMetadataManifest fromXContentV0(XContentParser parser) throws IOException { + return PARSER_V0.parse(parser, null); + } + + public static ClusterMetadataManifest fromXContent(XContentParser parser) throws IOException { + return CURRENT_PARSER.parse(parser, null); + } + + /** + * Builder for ClusterMetadataManifest + * + * @opensearch.internal + */ + public static class Builder { + + private String globalMetadataFileName; + private int codecVersion; + private List indices; + private long clusterTerm; + private long stateVersion; + private String clusterUUID; + private String stateUUID; + private Version opensearchVersion; + private String nodeId; + private String previousClusterUUID; + private boolean committed; + private boolean clusterUUIDCommitted; + + public Builder indices(List indices) { + this.indices = indices; + return this; + } + + public Builder codecVersion(int codecVersion) { + this.codecVersion = codecVersion; + return this; + } + + public Builder globalMetadataFileName(String globalMetadataFileName) { + this.globalMetadataFileName = globalMetadataFileName; + return this; + } + + public Builder clusterTerm(long clusterTerm) { + this.clusterTerm = clusterTerm; + return this; + } + + public Builder stateVersion(long stateVersion) { + this.stateVersion = stateVersion; + return this; + } + + public Builder clusterUUID(String clusterUUID) { + this.clusterUUID = clusterUUID; + return this; + } + + public Builder stateUUID(String stateUUID) { + this.stateUUID = stateUUID; + return this; + } + + public Builder opensearchVersion(Version opensearchVersion) { + this.opensearchVersion = opensearchVersion; + return this; + } + + public Builder nodeId(String nodeId) { + this.nodeId = nodeId; + return this; + } + + public Builder committed(boolean committed) { + this.committed = committed; + return this; + } + + public List getIndices() { + return indices; + } + + public Builder previousClusterUUID(String previousClusterUUID) { + this.previousClusterUUID = previousClusterUUID; + return this; + } + + public Builder clusterUUIDCommitted(boolean clusterUUIDCommitted) { + this.clusterUUIDCommitted = clusterUUIDCommitted; + return this; + } + + public Builder() { + indices = new ArrayList<>(); + } + + public Builder(ClusterMetadataManifest manifest) { + this.clusterTerm = manifest.clusterTerm; + this.stateVersion = manifest.stateVersion; + this.clusterUUID = manifest.clusterUUID; + this.stateUUID = manifest.stateUUID; + this.opensearchVersion = manifest.opensearchVersion; + this.nodeId = manifest.nodeId; + this.committed = manifest.committed; + this.globalMetadataFileName = manifest.globalMetadataFileName; + this.codecVersion = manifest.codecVersion; + this.indices = new ArrayList<>(manifest.indices); + this.previousClusterUUID = manifest.previousClusterUUID; + this.clusterUUIDCommitted = manifest.clusterUUIDCommitted; + } + + public ClusterMetadataManifest build() { + return new ClusterMetadataManifest( + clusterTerm, + stateVersion, + clusterUUID, + stateUUID, + opensearchVersion, + nodeId, + committed, + codecVersion, + globalMetadataFileName, + indices, + previousClusterUUID, + clusterUUIDCommitted + ); + } + + } + + /** + * Metadata for uploaded index metadata + * + * @opensearch.internal + */ + public static class UploadedIndexMetadata implements Writeable, ToXContentFragment { + + private static final ParseField INDEX_NAME_FIELD = new ParseField("index_name"); + private static final ParseField INDEX_UUID_FIELD = new ParseField("index_uuid"); + private static final ParseField UPLOADED_FILENAME_FIELD = new ParseField("uploaded_filename"); + + private static String indexName(Object[] fields) { + return (String) fields[0]; + } + + private static String indexUUID(Object[] fields) { + return (String) fields[1]; + } + + private static String uploadedFilename(Object[] fields) { + return (String) fields[2]; + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "uploaded_index_metadata", + fields -> new UploadedIndexMetadata(indexName(fields), indexUUID(fields), uploadedFilename(fields)) + ); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_NAME_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_UUID_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), UPLOADED_FILENAME_FIELD); + } + + private final String indexName; + private final String indexUUID; + private final String uploadedFilename; + + public UploadedIndexMetadata(String indexName, String indexUUID, String uploadedFileName) { + this.indexName = indexName; + this.indexUUID = indexUUID; + this.uploadedFilename = uploadedFileName; + } + + public UploadedIndexMetadata(StreamInput in) throws IOException { + this.indexName = in.readString(); + this.indexUUID = in.readString(); + this.uploadedFilename = in.readString(); + } + + public String getUploadedFilePath() { + return uploadedFilename; + } + + public String getUploadedFilename() { + String[] splitPath = uploadedFilename.split("/"); + return splitPath[splitPath.length - 1]; + } + + public String getIndexName() { + return indexName; + } + + public String getIndexUUID() { + return indexUUID; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(INDEX_NAME_FIELD.getPreferredName(), getIndexName()) + .field(INDEX_UUID_FIELD.getPreferredName(), getIndexUUID()) + .field(UPLOADED_FILENAME_FIELD.getPreferredName(), getUploadedFilePath()) + .endObject(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(indexName); + out.writeString(indexUUID); + out.writeString(uploadedFilename); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final UploadedIndexMetadata that = (UploadedIndexMetadata) o; + return Objects.equals(indexName, that.indexName) + && Objects.equals(indexUUID, that.indexUUID) + && Objects.equals(uploadedFilename, that.uploadedFilename); + } + + @Override + public int hashCode() { + return Objects.hash(indexName, indexUUID, uploadedFilename); + } + + @Override + public String toString() { + return Strings.toString(MediaTypeRegistry.JSON, this); + } + + public static UploadedIndexMetadata fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java new file mode 100644 index 0000000000000..c892b475d71da --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -0,0 +1,1335 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.Version; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.common.Nullable; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat; +import org.opensearch.threadpool.ThreadPool; + +import java.io.Closeable; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.function.LongSupplier; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.opensearch.gateway.PersistedClusterStateService.SLOW_WRITE_LOGGING_THRESHOLD; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; + +/** + * A Service which provides APIs to upload and download cluster metadata from remote store. + * + * @opensearch.internal + */ +public class RemoteClusterStateService implements Closeable { + + public static final String METADATA_NAME_FORMAT = "%s.dat"; + + public static final String METADATA_MANIFEST_NAME_FORMAT = "%s"; + + public static final int RETAINED_MANIFESTS = 10; + + public static final String DELIMITER = "__"; + + private static final Logger logger = LogManager.getLogger(RemoteClusterStateService.class); + + public static final TimeValue INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + + public static final TimeValue GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + + public static final TimeValue METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT = TimeValue.timeValueMillis(20000); + + public static final Setting INDEX_METADATA_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.index_metadata.upload_timeout", + INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.global_metadata.upload_timeout", + GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.state.metadata_manifest.upload_timeout", + METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final ChecksumBlobStoreFormat INDEX_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( + "index-metadata", + METADATA_NAME_FORMAT, + IndexMetadata::fromXContent + ); + + public static final ChecksumBlobStoreFormat GLOBAL_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( + "metadata", + METADATA_NAME_FORMAT, + Metadata::fromXContent + ); + + /** + * Manifest format compatible with older codec v0, where codec version was missing. + */ + public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT_V0 = + new ChecksumBlobStoreFormat<>("cluster-metadata-manifest", METADATA_MANIFEST_NAME_FORMAT, ClusterMetadataManifest::fromXContentV0); + + /** + * Manifest format compatible with codec v1, where we introduced codec versions/global metadata. + */ + public static final ChecksumBlobStoreFormat CLUSTER_METADATA_MANIFEST_FORMAT = new ChecksumBlobStoreFormat<>( + "cluster-metadata-manifest", + METADATA_MANIFEST_NAME_FORMAT, + ClusterMetadataManifest::fromXContent + ); + + /** + * Used to specify if cluster state metadata should be published to remote store + */ + public static final Setting REMOTE_CLUSTER_STATE_ENABLED_SETTING = Setting.boolSetting( + "cluster.remote_store.state.enabled", + false, + Property.NodeScope, + Property.Final + ); + + public static final String CLUSTER_STATE_PATH_TOKEN = "cluster-state"; + public static final String INDEX_PATH_TOKEN = "index"; + public static final String GLOBAL_METADATA_PATH_TOKEN = "global-metadata"; + public static final String MANIFEST_PATH_TOKEN = "manifest"; + public static final String MANIFEST_FILE_PREFIX = "manifest"; + public static final String METADATA_FILE_PREFIX = "metadata"; + public static final int SPLITED_MANIFEST_FILE_LENGTH = 6; // file name manifest__term__version__C/P__timestamp__codecversion + + private final String nodeId; + private final Supplier repositoriesService; + private final Settings settings; + private final LongSupplier relativeTimeNanosSupplier; + private final ThreadPool threadpool; + private BlobStoreRepository blobStoreRepository; + private BlobStoreTransferService blobStoreTransferService; + private volatile TimeValue slowWriteLoggingThreshold; + + private volatile TimeValue indexMetadataUploadTimeout; + private volatile TimeValue globalMetadataUploadTimeout; + private volatile TimeValue metadataManifestUploadTimeout; + + private final AtomicBoolean deleteStaleMetadataRunning = new AtomicBoolean(false); + private final RemotePersistenceStats remoteStateStats; + public static final int INDEX_METADATA_CURRENT_CODEC_VERSION = 1; + public static final int MANIFEST_CURRENT_CODEC_VERSION = ClusterMetadataManifest.CODEC_V1; + public static final int GLOBAL_METADATA_CURRENT_CODEC_VERSION = 1; + + // ToXContent Params with gateway mode. + // We are using gateway context mode to persist all custom metadata. + public static final ToXContent.Params FORMAT_PARAMS; + static { + Map params = new HashMap<>(1); + params.put(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_GATEWAY); + FORMAT_PARAMS = new ToXContent.MapParams(params); + } + + public RemoteClusterStateService( + String nodeId, + Supplier repositoriesService, + Settings settings, + ClusterSettings clusterSettings, + LongSupplier relativeTimeNanosSupplier, + ThreadPool threadPool + ) { + assert isRemoteStoreClusterStateEnabled(settings) : "Remote cluster state is not enabled"; + this.nodeId = nodeId; + this.repositoriesService = repositoriesService; + this.settings = settings; + this.relativeTimeNanosSupplier = relativeTimeNanosSupplier; + this.threadpool = threadPool; + this.slowWriteLoggingThreshold = clusterSettings.get(SLOW_WRITE_LOGGING_THRESHOLD); + this.indexMetadataUploadTimeout = clusterSettings.get(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING); + this.globalMetadataUploadTimeout = clusterSettings.get(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING); + this.metadataManifestUploadTimeout = clusterSettings.get(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING); + clusterSettings.addSettingsUpdateConsumer(SLOW_WRITE_LOGGING_THRESHOLD, this::setSlowWriteLoggingThreshold); + clusterSettings.addSettingsUpdateConsumer(INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, this::setIndexMetadataUploadTimeout); + clusterSettings.addSettingsUpdateConsumer(GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, this::setGlobalMetadataUploadTimeout); + clusterSettings.addSettingsUpdateConsumer(METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, this::setMetadataManifestUploadTimeout); + this.remoteStateStats = new RemotePersistenceStats(); + } + + private BlobStoreTransferService getBlobStoreTransferService() { + if (blobStoreTransferService == null) { + blobStoreTransferService = new BlobStoreTransferService(blobStoreRepository.blobStore(), threadpool); + } + return blobStoreTransferService; + } + + /** + * This method uploads entire cluster state metadata to the configured blob store. For now only index metadata upload is supported. This method should be + * invoked by the elected cluster manager when the remote cluster state is enabled. + * + * @return A manifest object which contains the details of uploaded entity metadata. + */ + @Nullable + public ClusterMetadataManifest writeFullMetadata(ClusterState clusterState, String previousClusterUUID) throws IOException { + final long startTimeNanos = relativeTimeNanosSupplier.getAsLong(); + if (clusterState.nodes().isLocalNodeElectedClusterManager() == false) { + logger.error("Local node is not elected cluster manager. Exiting"); + return null; + } + + // TODO: we can upload global metadata and index metadata in parallel. [issue: #10645] + // Write globalMetadata + String globalMetadataFile = writeGlobalMetadata(clusterState); + + // any validations before/after upload ? + final List allUploadedIndexMetadata = writeIndexMetadataParallel( + clusterState, + new ArrayList<>(clusterState.metadata().indices().values()) + ); + final ClusterMetadataManifest manifest = uploadManifest( + clusterState, + allUploadedIndexMetadata, + previousClusterUUID, + globalMetadataFile, + false + ); + final long durationMillis = TimeValue.nsecToMSec(relativeTimeNanosSupplier.getAsLong() - startTimeNanos); + remoteStateStats.stateSucceeded(); + remoteStateStats.stateTook(durationMillis); + if (durationMillis >= slowWriteLoggingThreshold.getMillis()) { + logger.warn( + "writing cluster state took [{}ms] which is above the warn threshold of [{}]; " + "wrote full state with [{}] indices", + durationMillis, + slowWriteLoggingThreshold, + allUploadedIndexMetadata.size() + ); + } else { + logger.info( + "writing cluster state took [{}ms]; " + "wrote full state with [{}] indices and global metadata", + durationMillis, + allUploadedIndexMetadata.size() + ); + } + return manifest; + } + + /** + * This method uploads the diff between the previous cluster state and the current cluster state. The previous manifest file is needed to create the new + * manifest. The new manifest file is created by using the unchanged metadata from the previous manifest and the new metadata changes from the current + * cluster state. + * + * @return The uploaded ClusterMetadataManifest file + */ + @Nullable + public ClusterMetadataManifest writeIncrementalMetadata( + ClusterState previousClusterState, + ClusterState clusterState, + ClusterMetadataManifest previousManifest + ) throws IOException { + final long startTimeNanos = relativeTimeNanosSupplier.getAsLong(); + if (clusterState.nodes().isLocalNodeElectedClusterManager() == false) { + logger.error("Local node is not elected cluster manager. Exiting"); + return null; + } + assert previousClusterState.metadata().coordinationMetadata().term() == clusterState.metadata().coordinationMetadata().term(); + + // Write Global Metadata + final boolean updateGlobalMetadata = Metadata.isGlobalStateEquals( + previousClusterState.metadata(), + clusterState.metadata() + ) == false; + String globalMetadataFile; + // For migration case from codec V0 to V1, we have added null check on global metadata file, + // If file is empty and codec is 1 then write global metadata. + if (updateGlobalMetadata || previousManifest.getGlobalMetadataFileName() == null) { + globalMetadataFile = writeGlobalMetadata(clusterState); + } else { + logger.debug("Global metadata has not updated in cluster state, skipping upload of it"); + globalMetadataFile = previousManifest.getGlobalMetadataFileName(); + } + + // Write Index Metadata + final Map previousStateIndexMetadataVersionByName = new HashMap<>(); + for (final IndexMetadata indexMetadata : previousClusterState.metadata().indices().values()) { + previousStateIndexMetadataVersionByName.put(indexMetadata.getIndex().getName(), indexMetadata.getVersion()); + } + + int numIndicesUpdated = 0; + int numIndicesUnchanged = 0; + final Map allUploadedIndexMetadata = previousManifest.getIndices() + .stream() + .collect(Collectors.toMap(UploadedIndexMetadata::getIndexName, Function.identity())); + + List toUpload = new ArrayList<>(); + + for (final IndexMetadata indexMetadata : clusterState.metadata().indices().values()) { + final Long previousVersion = previousStateIndexMetadataVersionByName.get(indexMetadata.getIndex().getName()); + if (previousVersion == null || indexMetadata.getVersion() != previousVersion) { + logger.debug( + "updating metadata for [{}], changing version from [{}] to [{}]", + indexMetadata.getIndex(), + previousVersion, + indexMetadata.getVersion() + ); + numIndicesUpdated++; + toUpload.add(indexMetadata); + } else { + numIndicesUnchanged++; + } + previousStateIndexMetadataVersionByName.remove(indexMetadata.getIndex().getName()); + } + + List uploadedIndexMetadataList = writeIndexMetadataParallel(clusterState, toUpload); + uploadedIndexMetadataList.forEach( + uploadedIndexMetadata -> allUploadedIndexMetadata.put(uploadedIndexMetadata.getIndexName(), uploadedIndexMetadata) + ); + + for (String removedIndexName : previousStateIndexMetadataVersionByName.keySet()) { + allUploadedIndexMetadata.remove(removedIndexName); + } + final ClusterMetadataManifest manifest = uploadManifest( + clusterState, + new ArrayList<>(allUploadedIndexMetadata.values()), + previousManifest.getPreviousClusterUUID(), + globalMetadataFile, + false + ); + deleteStaleClusterMetadata(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID(), RETAINED_MANIFESTS); + + final long durationMillis = TimeValue.nsecToMSec(relativeTimeNanosSupplier.getAsLong() - startTimeNanos); + remoteStateStats.stateSucceeded(); + remoteStateStats.stateTook(durationMillis); + if (durationMillis >= slowWriteLoggingThreshold.getMillis()) { + logger.warn( + "writing cluster state took [{}ms] which is above the warn threshold of [{}]; " + + "wrote metadata for [{}] indices and skipped [{}] unchanged indices, global metadata updated : [{}]", + durationMillis, + slowWriteLoggingThreshold, + numIndicesUpdated, + numIndicesUnchanged, + updateGlobalMetadata + ); + } else { + logger.info( + "writing cluster state for version [{}] took [{}ms]; " + + "wrote metadata for [{}] indices and skipped [{}] unchanged indices, global metadata updated : [{}]", + manifest.getStateVersion(), + durationMillis, + numIndicesUpdated, + numIndicesUnchanged, + updateGlobalMetadata + ); + } + return manifest; + } + + /** + * Uploads provided ClusterState's global Metadata to remote store in parallel. + * The call is blocking so the method waits for upload to finish and then return. + * + * @param clusterState current ClusterState + * @return String file name where globalMetadata file is stored. + */ + private String writeGlobalMetadata(ClusterState clusterState) throws IOException { + + AtomicReference result = new AtomicReference(); + AtomicReference exceptionReference = new AtomicReference(); + + final BlobContainer globalMetadataContainer = globalMetadataContainer( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); + final String globalMetadataFilename = globalMetadataFileName(clusterState.metadata()); + + // latch to wait until upload is not finished + CountDownLatch latch = new CountDownLatch(1); + + LatchedActionListener completionListener = new LatchedActionListener<>(ActionListener.wrap(resp -> { + logger.trace(String.format(Locale.ROOT, "GlobalMetadata uploaded successfully.")); + result.set(globalMetadataContainer.path().buildAsString() + globalMetadataFilename); + }, ex -> { exceptionReference.set(ex); }), latch); + + GLOBAL_METADATA_FORMAT.writeAsyncWithUrgentPriority( + clusterState.metadata(), + globalMetadataContainer, + globalMetadataFilename, + blobStoreRepository.getCompressor(), + completionListener, + FORMAT_PARAMS + ); + + try { + if (latch.await(getGlobalMetadataUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { + // TODO: We should add metrics where transfer is timing out. [Issue: #10687] + RemoteStateTransferException ex = new RemoteStateTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of global metadata to complete") + ); + throw ex; + } + } catch (InterruptedException ex) { + RemoteStateTransferException exception = new RemoteStateTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of global metadata to complete - %s"), + ex + ); + Thread.currentThread().interrupt(); + throw exception; + } + if (exceptionReference.get() != null) { + throw new RemoteStateTransferException(exceptionReference.get().getMessage(), exceptionReference.get()); + } + return result.get(); + } + + /** + * Uploads provided IndexMetadata's to remote store in parallel. The call is blocking so the method waits for upload to finish and then return. + * + * @param clusterState current ClusterState + * @param toUpload list of IndexMetadata to upload + * @return {@code List} list of IndexMetadata uploaded to remote + */ + private List writeIndexMetadataParallel(ClusterState clusterState, List toUpload) + throws IOException { + List exceptionList = Collections.synchronizedList(new ArrayList<>(toUpload.size())); + final CountDownLatch latch = new CountDownLatch(toUpload.size()); + List result = new ArrayList<>(toUpload.size()); + + LatchedActionListener latchedActionListener = new LatchedActionListener<>( + ActionListener.wrap((UploadedIndexMetadata uploadedIndexMetadata) -> { + logger.trace( + String.format(Locale.ROOT, "IndexMetadata uploaded successfully for %s", uploadedIndexMetadata.getIndexName()) + ); + result.add(uploadedIndexMetadata); + }, ex -> { + assert ex instanceof RemoteStateTransferException; + logger.error( + () -> new ParameterizedMessage("Exception during transfer of IndexMetadata to Remote {}", ex.getMessage()), + ex + ); + exceptionList.add(ex); + }), + latch + ); + + for (IndexMetadata indexMetadata : toUpload) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/index/ftqsCnn9TgOX/metadata_4_1690947200 + writeIndexMetadataAsync(clusterState, indexMetadata, latchedActionListener); + } + + try { + if (latch.await(getIndexMetadataUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { + RemoteStateTransferException ex = new RemoteStateTransferException( + String.format( + Locale.ROOT, + "Timed out waiting for transfer of index metadata to complete - %s", + toUpload.stream().map(IndexMetadata::getIndex).map(Index::toString).collect(Collectors.joining("")) + ) + ); + exceptionList.forEach(ex::addSuppressed); + throw ex; + } + } catch (InterruptedException ex) { + exceptionList.forEach(ex::addSuppressed); + RemoteStateTransferException exception = new RemoteStateTransferException( + String.format( + Locale.ROOT, + "Timed out waiting for transfer of index metadata to complete - %s", + toUpload.stream().map(IndexMetadata::getIndex).map(Index::toString).collect(Collectors.joining("")) + ), + ex + ); + Thread.currentThread().interrupt(); + throw exception; + } + if (exceptionList.size() > 0) { + RemoteStateTransferException exception = new RemoteStateTransferException( + String.format( + Locale.ROOT, + "Exception during transfer of IndexMetadata to Remote %s", + toUpload.stream().map(IndexMetadata::getIndex).map(Index::toString).collect(Collectors.joining("")) + ) + ); + exceptionList.forEach(exception::addSuppressed); + throw exception; + } + return result; + } + + /** + * Allows async Upload of IndexMetadata to remote + * + * @param clusterState current ClusterState + * @param indexMetadata {@link IndexMetadata} to upload + * @param latchedActionListener listener to respond back on after upload finishes + */ + private void writeIndexMetadataAsync( + ClusterState clusterState, + IndexMetadata indexMetadata, + LatchedActionListener latchedActionListener + ) throws IOException { + final BlobContainer indexMetadataContainer = indexMetadataContainer( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID(), + indexMetadata.getIndexUUID() + ); + final String indexMetadataFilename = indexMetadataFileName(indexMetadata); + ActionListener completionListener = ActionListener.wrap( + resp -> latchedActionListener.onResponse( + new UploadedIndexMetadata( + indexMetadata.getIndex().getName(), + indexMetadata.getIndexUUID(), + indexMetadataContainer.path().buildAsString() + indexMetadataFilename + ) + ), + ex -> latchedActionListener.onFailure(new RemoteStateTransferException(indexMetadata.getIndex().toString(), ex)) + ); + + INDEX_METADATA_FORMAT.writeAsyncWithUrgentPriority( + indexMetadata, + indexMetadataContainer, + indexMetadataFilename, + blobStoreRepository.getCompressor(), + completionListener, + FORMAT_PARAMS + ); + } + + @Nullable + public ClusterMetadataManifest markLastStateAsCommitted(ClusterState clusterState, ClusterMetadataManifest previousManifest) + throws IOException { + assert clusterState != null : "Last accepted cluster state is not set"; + if (clusterState.nodes().isLocalNodeElectedClusterManager() == false) { + logger.error("Local node is not elected cluster manager. Exiting"); + return null; + } + assert previousManifest != null : "Last cluster metadata manifest is not set"; + ClusterMetadataManifest committedManifest = uploadManifest( + clusterState, + previousManifest.getIndices(), + previousManifest.getPreviousClusterUUID(), + previousManifest.getGlobalMetadataFileName(), + true + ); + deleteStaleClusterUUIDs(clusterState, committedManifest); + return committedManifest; + } + + @Override + public void close() throws IOException { + if (blobStoreRepository != null) { + IOUtils.close(blobStoreRepository); + } + } + + public void start() { + assert isRemoteStoreClusterStateEnabled(settings) == true : "Remote cluster state is not enabled"; + final String remoteStoreRepo = settings.get( + Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY + ); + assert remoteStoreRepo != null : "Remote Cluster State repository is not configured"; + final Repository repository = repositoriesService.get().repository(remoteStoreRepo); + assert repository instanceof BlobStoreRepository : "Repository should be instance of BlobStoreRepository"; + blobStoreRepository = (BlobStoreRepository) repository; + } + + private ClusterMetadataManifest uploadManifest( + ClusterState clusterState, + List uploadedIndexMetadata, + String previousClusterUUID, + String globalClusterMetadataFileName, + boolean committed + ) throws IOException { + synchronized (this) { + final String manifestFileName = getManifestFileName(clusterState.term(), clusterState.version(), committed); + final ClusterMetadataManifest manifest = new ClusterMetadataManifest( + clusterState.term(), + clusterState.getVersion(), + clusterState.metadata().clusterUUID(), + clusterState.stateUUID(), + Version.CURRENT, + nodeId, + committed, + MANIFEST_CURRENT_CODEC_VERSION, + globalClusterMetadataFileName, + uploadedIndexMetadata, + previousClusterUUID, + clusterState.metadata().clusterUUIDCommitted() + ); + writeMetadataManifest(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID(), manifest, manifestFileName); + return manifest; + } + } + + private void writeMetadataManifest(String clusterName, String clusterUUID, ClusterMetadataManifest uploadManifest, String fileName) + throws IOException { + AtomicReference result = new AtomicReference(); + AtomicReference exceptionReference = new AtomicReference(); + + final BlobContainer metadataManifestContainer = manifestContainer(clusterName, clusterUUID); + + // latch to wait until upload is not finished + CountDownLatch latch = new CountDownLatch(1); + + LatchedActionListener completionListener = new LatchedActionListener<>(ActionListener.wrap(resp -> { + logger.trace(String.format(Locale.ROOT, "Manifest file uploaded successfully.")); + }, ex -> { exceptionReference.set(ex); }), latch); + + CLUSTER_METADATA_MANIFEST_FORMAT.writeAsyncWithUrgentPriority( + uploadManifest, + metadataManifestContainer, + fileName, + blobStoreRepository.getCompressor(), + completionListener, + FORMAT_PARAMS + ); + + try { + if (latch.await(getMetadataManifestUploadTimeout().millis(), TimeUnit.MILLISECONDS) == false) { + RemoteStateTransferException ex = new RemoteStateTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of manifest file to complete") + ); + throw ex; + } + } catch (InterruptedException ex) { + RemoteStateTransferException exception = new RemoteStateTransferException( + String.format(Locale.ROOT, "Timed out waiting for transfer of manifest file to complete - %s"), + ex + ); + Thread.currentThread().interrupt(); + throw exception; + } + if (exceptionReference.get() != null) { + throw new RemoteStateTransferException(exceptionReference.get().getMessage(), exceptionReference.get()); + } + logger.debug( + "Metadata manifest file [{}] written during [{}] phase. ", + fileName, + uploadManifest.isCommitted() ? "commit" : "publish" + ); + } + + private String fetchPreviousClusterUUID(String clusterName, String clusterUUID) { + final Optional latestManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); + if (!latestManifest.isPresent()) { + final String previousClusterUUID = getLastKnownUUIDFromRemote(clusterName); + assert !clusterUUID.equals(previousClusterUUID) : "Last cluster UUID is same current cluster UUID"; + return previousClusterUUID; + } + return latestManifest.get().getPreviousClusterUUID(); + } + + private BlobContainer indexMetadataContainer(String clusterName, String clusterUUID, String indexUUID) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/index/ftqsCnn9TgOX + return blobStoreRepository.blobStore() + .blobContainer(getCusterMetadataBasePath(clusterName, clusterUUID).add(INDEX_PATH_TOKEN).add(indexUUID)); + } + + private BlobContainer globalMetadataContainer(String clusterName, String clusterUUID) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/ + return blobStoreRepository.blobStore() + .blobContainer(getCusterMetadataBasePath(clusterName, clusterUUID).add(GLOBAL_METADATA_PATH_TOKEN)); + } + + private BlobContainer manifestContainer(String clusterName, String clusterUUID) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest + return blobStoreRepository.blobStore().blobContainer(getManifestFolderPath(clusterName, clusterUUID)); + } + + private BlobPath getCusterMetadataBasePath(String clusterName, String clusterUUID) { + return blobStoreRepository.basePath().add(encodeString(clusterName)).add(CLUSTER_STATE_PATH_TOKEN).add(clusterUUID); + } + + private BlobContainer clusterUUIDContainer(String clusterName) { + return blobStoreRepository.blobStore() + .blobContainer( + blobStoreRepository.basePath() + .add(Base64.getUrlEncoder().withoutPadding().encodeToString(clusterName.getBytes(StandardCharsets.UTF_8))) + .add(CLUSTER_STATE_PATH_TOKEN) + ); + } + + private void setSlowWriteLoggingThreshold(TimeValue slowWriteLoggingThreshold) { + this.slowWriteLoggingThreshold = slowWriteLoggingThreshold; + } + + private void setIndexMetadataUploadTimeout(TimeValue newIndexMetadataUploadTimeout) { + this.indexMetadataUploadTimeout = newIndexMetadataUploadTimeout; + } + + private void setGlobalMetadataUploadTimeout(TimeValue newGlobalMetadataUploadTimeout) { + this.globalMetadataUploadTimeout = newGlobalMetadataUploadTimeout; + } + + private void setMetadataManifestUploadTimeout(TimeValue newMetadataManifestUploadTimeout) { + this.metadataManifestUploadTimeout = newMetadataManifestUploadTimeout; + } + + public TimeValue getIndexMetadataUploadTimeout() { + return this.indexMetadataUploadTimeout; + } + + public TimeValue getGlobalMetadataUploadTimeout() { + return this.globalMetadataUploadTimeout; + } + + public TimeValue getMetadataManifestUploadTimeout() { + return this.metadataManifestUploadTimeout; + } + + static String getManifestFileName(long term, long version, boolean committed) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/manifest/manifest______C/P____ + return String.join( + DELIMITER, + MANIFEST_PATH_TOKEN, + RemoteStoreUtils.invertLong(term), + RemoteStoreUtils.invertLong(version), + (committed ? "C" : "P"), // C for committed and P for published + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(MANIFEST_CURRENT_CODEC_VERSION) // Keep the codec version at last place only, during read we reads last place to + // determine codec version. + ); + } + + static String indexMetadataFileName(IndexMetadata indexMetadata) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/index//metadata______ + return String.join( + DELIMITER, + METADATA_FILE_PREFIX, + RemoteStoreUtils.invertLong(indexMetadata.getVersion()), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(INDEX_METADATA_CURRENT_CODEC_VERSION) // Keep the codec version at last place only, during read we reads last + // place to determine codec version. + ); + } + + private static String globalMetadataFileName(Metadata metadata) { + // 123456789012_test-cluster/cluster-state/dsgYj10Nkso7/global-metadata/metadata______ + return String.join( + DELIMITER, + METADATA_FILE_PREFIX, + RemoteStoreUtils.invertLong(metadata.version()), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(GLOBAL_METADATA_CURRENT_CODEC_VERSION) + ); + } + + private BlobPath getManifestFolderPath(String clusterName, String clusterUUID) { + return getCusterMetadataBasePath(clusterName, clusterUUID).add(MANIFEST_PATH_TOKEN); + } + + /** + * Fetch latest index metadata from remote cluster state + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @param clusterMetadataManifest manifest file of cluster + * @return {@code Map} latest IndexUUID to IndexMetadata map + */ + private Map getIndexMetadataMap( + String clusterName, + String clusterUUID, + ClusterMetadataManifest clusterMetadataManifest + ) { + assert Objects.equals(clusterUUID, clusterMetadataManifest.getClusterUUID()) + : "Corrupt ClusterMetadataManifest found. Cluster UUID mismatch."; + Map remoteIndexMetadata = new HashMap<>(); + for (UploadedIndexMetadata uploadedIndexMetadata : clusterMetadataManifest.getIndices()) { + IndexMetadata indexMetadata = getIndexMetadata(clusterName, clusterUUID, uploadedIndexMetadata); + remoteIndexMetadata.put(uploadedIndexMetadata.getIndexUUID(), indexMetadata); + } + return remoteIndexMetadata; + } + + /** + * Fetch index metadata from remote cluster state + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @param uploadedIndexMetadata {@link UploadedIndexMetadata} contains details about remote location of index metadata + * @return {@link IndexMetadata} + */ + private IndexMetadata getIndexMetadata(String clusterName, String clusterUUID, UploadedIndexMetadata uploadedIndexMetadata) { + BlobContainer blobContainer = indexMetadataContainer(clusterName, clusterUUID, uploadedIndexMetadata.getIndexUUID()); + try { + String[] splitPath = uploadedIndexMetadata.getUploadedFilename().split("/"); + return INDEX_METADATA_FORMAT.read( + blobContainer, + splitPath[splitPath.length - 1], + blobStoreRepository.getNamedXContentRegistry() + ); + } catch (IOException e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Error while downloading IndexMetadata - %s", uploadedIndexMetadata.getUploadedFilename()), + e + ); + } + } + + /** + * Fetch latest ClusterState from remote, including global metadata, index metadata and cluster state version + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return {@link IndexMetadata} + */ + public ClusterState getLatestClusterState(String clusterName, String clusterUUID) { + start(); + Optional clusterMetadataManifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); + if (clusterMetadataManifest.isEmpty()) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Latest cluster metadata manifest is not present for the provided clusterUUID: %s", clusterUUID) + ); + } + // Fetch Global Metadata + Metadata globalMetadata = getGlobalMetadata(clusterName, clusterUUID, clusterMetadataManifest.get()); + + // Fetch Index Metadata + Map indices = getIndexMetadataMap(clusterName, clusterUUID, clusterMetadataManifest.get()); + + Map indexMetadataMap = new HashMap<>(); + indices.values().forEach(indexMetadata -> { indexMetadataMap.put(indexMetadata.getIndex().getName(), indexMetadata); }); + + return ClusterState.builder(ClusterState.EMPTY_STATE) + .version(clusterMetadataManifest.get().getStateVersion()) + .metadata(Metadata.builder(globalMetadata).indices(indexMetadataMap).build()) + .build(); + } + + private Metadata getGlobalMetadata(String clusterName, String clusterUUID, ClusterMetadataManifest clusterMetadataManifest) { + String globalMetadataFileName = clusterMetadataManifest.getGlobalMetadataFileName(); + try { + // Fetch Global metadata + if (globalMetadataFileName != null) { + String[] splitPath = globalMetadataFileName.split("/"); + return GLOBAL_METADATA_FORMAT.read( + globalMetadataContainer(clusterName, clusterUUID), + splitPath[splitPath.length - 1], + blobStoreRepository.getNamedXContentRegistry() + ); + } else { + return Metadata.EMPTY_METADATA; + } + } catch (IOException e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Error while downloading Global Metadata - %s", globalMetadataFileName), + e + ); + } + } + + /** + * Fetch latest ClusterMetadataManifest from remote state store + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return ClusterMetadataManifest + */ + public Optional getLatestClusterMetadataManifest(String clusterName, String clusterUUID) { + Optional latestManifestFileName = getLatestManifestFileName(clusterName, clusterUUID); + return latestManifestFileName.map(s -> fetchRemoteClusterMetadataManifest(clusterName, clusterUUID, s)); + } + + /** + * Fetch the previous cluster UUIDs from remote state store and return the most recent valid cluster UUID + * + * @param clusterName The cluster name for which previous cluster UUID is to be fetched + * @return Last valid cluster UUID + */ + public String getLastKnownUUIDFromRemote(String clusterName) { + try { + Set clusterUUIDs = getAllClusterUUIDs(clusterName); + Map latestManifests = getLatestManifestForAllClusterUUIDs(clusterName, clusterUUIDs); + List validChain = createClusterChain(latestManifests, clusterName); + if (validChain.isEmpty()) { + return ClusterState.UNKNOWN_UUID; + } + return validChain.get(0); + } catch (IOException e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Error while fetching previous UUIDs from remote store for cluster name: %s", clusterName), + e + ); + } + } + + private Set getAllClusterUUIDs(String clusterName) throws IOException { + Map clusterUUIDMetadata = clusterUUIDContainer(clusterName).children(); + if (clusterUUIDMetadata == null) { + return Collections.emptySet(); + } + return Collections.unmodifiableSet(clusterUUIDMetadata.keySet()); + } + + private Map getLatestManifestForAllClusterUUIDs(String clusterName, Set clusterUUIDs) { + Map manifestsByClusterUUID = new HashMap<>(); + for (String clusterUUID : clusterUUIDs) { + try { + Optional manifest = getLatestClusterMetadataManifest(clusterName, clusterUUID); + manifest.ifPresent(clusterMetadataManifest -> manifestsByClusterUUID.put(clusterUUID, clusterMetadataManifest)); + } catch (Exception e) { + throw new IllegalStateException( + String.format(Locale.ROOT, "Exception in fetching manifest for clusterUUID: %s", clusterUUID), + e + ); + } + } + return manifestsByClusterUUID; + } + + /** + * This method creates a valid cluster UUID chain. + * + * @param manifestsByClusterUUID Map of latest ClusterMetadataManifest for every cluster UUID + * @return List of cluster UUIDs. The first element is the most recent cluster UUID in the chain + */ + private List createClusterChain(final Map manifestsByClusterUUID, final String clusterName) { + final List validClusterManifests = manifestsByClusterUUID.values() + .stream() + .filter(this::isValidClusterUUID) + .collect(Collectors.toList()); + final Map clusterUUIDGraph = validClusterManifests.stream() + .collect(Collectors.toMap(ClusterMetadataManifest::getClusterUUID, ClusterMetadataManifest::getPreviousClusterUUID)); + final List topLevelClusterUUIDs = validClusterManifests.stream() + .map(ClusterMetadataManifest::getClusterUUID) + .filter(clusterUUID -> !clusterUUIDGraph.containsValue(clusterUUID)) + .collect(Collectors.toList()); + + if (topLevelClusterUUIDs.isEmpty()) { + // This can occur only when there are no valid cluster UUIDs + assert validClusterManifests.isEmpty() : "There are no top level cluster UUIDs even when there are valid cluster UUIDs"; + logger.info("There is no valid previous cluster UUID. All cluster UUIDs evaluated are: {}", manifestsByClusterUUID.keySet()); + return Collections.emptyList(); + } + if (topLevelClusterUUIDs.size() > 1) { + logger.info("Top level cluster UUIDs: {}", topLevelClusterUUIDs); + // If the valid cluster UUIDs are more that 1, it means there was some race condition where + // more then 2 cluster manager nodes tried to become active cluster manager and published + // 2 cluster UUIDs which followed the same previous UUID. + final Map manifestsByClusterUUIDTrimmed = trimClusterUUIDs( + manifestsByClusterUUID, + topLevelClusterUUIDs, + clusterName + ); + if (manifestsByClusterUUID.size() == manifestsByClusterUUIDTrimmed.size()) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "The system has ended into multiple valid cluster states in the remote store. " + + "Please check their latest manifest to decide which one you want to keep. Valid Cluster UUIDs: - %s", + topLevelClusterUUIDs + ) + ); + } + return createClusterChain(manifestsByClusterUUIDTrimmed, clusterName); + } + final List validChain = new ArrayList<>(); + String currentUUID = topLevelClusterUUIDs.get(0); + while (currentUUID != null && !ClusterState.UNKNOWN_UUID.equals(currentUUID)) { + validChain.add(currentUUID); + // Getting the previous cluster UUID of a cluster UUID from the clusterUUID Graph + currentUUID = clusterUUIDGraph.get(currentUUID); + } + logger.info("Known UUIDs found in remote store : [{}]", validChain); + return validChain; + } + + /** + * This method take a map of manifests for different cluster UUIDs and removes the + * manifest of a cluster UUID if the latest metadata for that cluster UUID is equivalent + * to the latest metadata of its previous UUID. + * @return Trimmed map of manifests + */ + private Map trimClusterUUIDs( + final Map latestManifestsByClusterUUID, + final List validClusterUUIDs, + final String clusterName + ) { + final Map trimmedUUIDs = new HashMap<>(latestManifestsByClusterUUID); + for (String clusterUUID : validClusterUUIDs) { + ClusterMetadataManifest currentManifest = trimmedUUIDs.get(clusterUUID); + // Here we compare the manifest of current UUID to that of previous UUID + // In case currentUUID's latest manifest is same as previous UUIDs latest manifest, + // that means it was restored from previousUUID and no IndexMetadata update was performed on it. + if (!ClusterState.UNKNOWN_UUID.equals(currentManifest.getPreviousClusterUUID())) { + ClusterMetadataManifest previousManifest = trimmedUUIDs.get(currentManifest.getPreviousClusterUUID()); + if (isMetadataEqual(currentManifest, previousManifest, clusterName) + && isGlobalMetadataEqual(currentManifest, previousManifest, clusterName)) { + trimmedUUIDs.remove(clusterUUID); + } + } + } + return trimmedUUIDs; + } + + private boolean isMetadataEqual(ClusterMetadataManifest first, ClusterMetadataManifest second, String clusterName) { + // todo clusterName can be set as final in the constructor + if (first.getIndices().size() != second.getIndices().size()) { + return false; + } + final Map secondIndices = second.getIndices() + .stream() + .collect(Collectors.toMap(md -> md.getIndexName(), Function.identity())); + for (UploadedIndexMetadata uploadedIndexMetadata : first.getIndices()) { + final IndexMetadata firstIndexMetadata = getIndexMetadata(clusterName, first.getClusterUUID(), uploadedIndexMetadata); + final UploadedIndexMetadata secondUploadedIndexMetadata = secondIndices.get(uploadedIndexMetadata.getIndexName()); + if (secondUploadedIndexMetadata == null) { + return false; + } + final IndexMetadata secondIndexMetadata = getIndexMetadata(clusterName, second.getClusterUUID(), secondUploadedIndexMetadata); + if (firstIndexMetadata.equals(secondIndexMetadata) == false) { + return false; + } + } + return true; + } + + private boolean isGlobalMetadataEqual(ClusterMetadataManifest first, ClusterMetadataManifest second, String clusterName) { + Metadata secondGlobalMetadata = getGlobalMetadata(clusterName, second.getClusterUUID(), second); + Metadata firstGlobalMetadata = getGlobalMetadata(clusterName, first.getClusterUUID(), first); + return Metadata.isGlobalResourcesMetadataEquals(firstGlobalMetadata, secondGlobalMetadata); + } + + private boolean isValidClusterUUID(ClusterMetadataManifest manifest) { + return manifest.isClusterUUIDCommitted(); + } + + /** + * Fetch ClusterMetadataManifest files from remote state store in order + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @param limit max no of files to fetch + * @return all manifest file names + */ + private List getManifestFileNames(String clusterName, String clusterUUID, int limit) throws IllegalStateException { + try { + + /* + {@link BlobContainer#listBlobsByPrefixInSortedOrder} will list the latest manifest file first + as the manifest file name generated via {@link RemoteClusterStateService#getManifestFileName} ensures + when sorted in LEXICOGRAPHIC order the latest uploaded manifest file comes on top. + */ + return manifestContainer(clusterName, clusterUUID).listBlobsByPrefixInSortedOrder( + MANIFEST_FILE_PREFIX + DELIMITER, + limit, + BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC + ); + } catch (IOException e) { + throw new IllegalStateException("Error while fetching latest manifest file for remote cluster state", e); + } + } + + /** + * Fetch latest ClusterMetadataManifest file from remote state store + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return latest ClusterMetadataManifest filename + */ + private Optional getLatestManifestFileName(String clusterName, String clusterUUID) throws IllegalStateException { + List manifestFilesMetadata = getManifestFileNames(clusterName, clusterUUID, 1); + if (manifestFilesMetadata != null && !manifestFilesMetadata.isEmpty()) { + return Optional.of(manifestFilesMetadata.get(0).name()); + } + logger.info("No manifest file present in remote store for cluster name: {}, cluster UUID: {}", clusterName, clusterUUID); + return Optional.empty(); + } + + /** + * Fetch ClusterMetadataManifest from remote state store + * + * @param clusterUUID uuid of cluster state to refer to in remote + * @param clusterName name of the cluster + * @return ClusterMetadataManifest + */ + private ClusterMetadataManifest fetchRemoteClusterMetadataManifest(String clusterName, String clusterUUID, String filename) + throws IllegalStateException { + try { + return getClusterMetadataManifestBlobStoreFormat(filename).read( + manifestContainer(clusterName, clusterUUID), + filename, + blobStoreRepository.getNamedXContentRegistry() + ); + } catch (IOException e) { + throw new IllegalStateException(String.format(Locale.ROOT, "Error while downloading cluster metadata - %s", filename), e); + } + } + + private ChecksumBlobStoreFormat getClusterMetadataManifestBlobStoreFormat(String fileName) { + long codecVersion = getManifestCodecVersion(fileName); + if (codecVersion == MANIFEST_CURRENT_CODEC_VERSION) { + return CLUSTER_METADATA_MANIFEST_FORMAT; + } else if (codecVersion == ClusterMetadataManifest.CODEC_V0) { + return CLUSTER_METADATA_MANIFEST_FORMAT_V0; + } + + throw new IllegalArgumentException("Cluster metadata manifest file is corrupted, don't have valid codec version"); + } + + private int getManifestCodecVersion(String fileName) { + String[] splitName = fileName.split(DELIMITER); + if (splitName.length == SPLITED_MANIFEST_FILE_LENGTH) { + return Integer.parseInt(splitName[splitName.length - 1]); // Last value would be codec version. + } else if (splitName.length < SPLITED_MANIFEST_FILE_LENGTH) { // Where codec is not part of file name, i.e. default codec version 0 + // is used. + return ClusterMetadataManifest.CODEC_V0; + } else { + throw new IllegalArgumentException("Manifest file name is corrupted"); + } + } + + public static String encodeString(String content) { + return Base64.getUrlEncoder().withoutPadding().encodeToString(content.getBytes(StandardCharsets.UTF_8)); + } + + public void writeMetadataFailed() { + getStats().stateFailed(); + } + + /** + * Exception for Remote state transfer. + */ + static class RemoteStateTransferException extends RuntimeException { + + public RemoteStateTransferException(String errorDesc) { + super(errorDesc); + } + + public RemoteStateTransferException(String errorDesc, Throwable cause) { + super(errorDesc, cause); + } + } + + /** + * Purges all remote cluster state against provided cluster UUIDs + * + * @param clusterName name of the cluster + * @param clusterUUIDs clusteUUIDs for which the remote state needs to be purged + */ + void deleteStaleUUIDsClusterMetadata(String clusterName, List clusterUUIDs) { + clusterUUIDs.forEach(clusterUUID -> { + getBlobStoreTransferService().deleteAsync( + ThreadPool.Names.REMOTE_PURGE, + getCusterMetadataBasePath(clusterName, clusterUUID), + new ActionListener<>() { + @Override + public void onResponse(Void unused) { + logger.info("Deleted all remote cluster metadata for cluster UUID - {}", clusterUUID); + } + + @Override + public void onFailure(Exception e) { + logger.error( + new ParameterizedMessage( + "Exception occurred while deleting all remote cluster metadata for cluster UUID {}", + clusterUUID + ), + e + ); + remoteStateStats.cleanUpAttemptFailed(); + } + } + ); + }); + } + + /** + * Deletes older than last {@code versionsToRetain} manifests. Also cleans up unreferenced IndexMetadata associated with older manifests + * + * @param clusterName name of the cluster + * @param clusterUUID uuid of cluster state to refer to in remote + * @param manifestsToRetain no of latest manifest files to keep in remote + */ + // package private for testing + void deleteStaleClusterMetadata(String clusterName, String clusterUUID, int manifestsToRetain) { + if (deleteStaleMetadataRunning.compareAndSet(false, true) == false) { + logger.info("Delete stale cluster metadata task is already in progress."); + return; + } + try { + getBlobStoreTransferService().listAllInSortedOrderAsync( + ThreadPool.Names.REMOTE_PURGE, + getManifestFolderPath(clusterName, clusterUUID), + "manifest", + Integer.MAX_VALUE, + new ActionListener<>() { + @Override + public void onResponse(List blobMetadata) { + if (blobMetadata.size() > manifestsToRetain) { + deleteClusterMetadata( + clusterName, + clusterUUID, + blobMetadata.subList(0, manifestsToRetain - 1), + blobMetadata.subList(manifestsToRetain - 1, blobMetadata.size()) + ); + } + deleteStaleMetadataRunning.set(false); + } + + @Override + public void onFailure(Exception e) { + logger.error( + new ParameterizedMessage( + "Exception occurred while deleting Remote Cluster Metadata for clusterUUIDs {}", + clusterUUID + ) + ); + deleteStaleMetadataRunning.set(false); + } + } + ); + } catch (Exception e) { + deleteStaleMetadataRunning.set(false); + throw e; + } + } + + private void deleteClusterMetadata( + String clusterName, + String clusterUUID, + List activeManifestBlobMetadata, + List staleManifestBlobMetadata + ) { + try { + Set filesToKeep = new HashSet<>(); + Set staleManifestPaths = new HashSet<>(); + Set staleIndexMetadataPaths = new HashSet<>(); + Set staleGlobalMetadataPaths = new HashSet<>(); + activeManifestBlobMetadata.forEach(blobMetadata -> { + ClusterMetadataManifest clusterMetadataManifest = fetchRemoteClusterMetadataManifest( + clusterName, + clusterUUID, + blobMetadata.name() + ); + clusterMetadataManifest.getIndices() + .forEach(uploadedIndexMetadata -> filesToKeep.add(uploadedIndexMetadata.getUploadedFilename())); + filesToKeep.add(clusterMetadataManifest.getGlobalMetadataFileName()); + }); + staleManifestBlobMetadata.forEach(blobMetadata -> { + ClusterMetadataManifest clusterMetadataManifest = fetchRemoteClusterMetadataManifest( + clusterName, + clusterUUID, + blobMetadata.name() + ); + staleManifestPaths.add(new BlobPath().add(MANIFEST_PATH_TOKEN).buildAsString() + blobMetadata.name()); + if (filesToKeep.contains(clusterMetadataManifest.getGlobalMetadataFileName()) == false) { + String[] globalMetadataSplitPath = clusterMetadataManifest.getGlobalMetadataFileName().split("/"); + staleGlobalMetadataPaths.add( + new BlobPath().add(GLOBAL_METADATA_PATH_TOKEN).buildAsString() + GLOBAL_METADATA_FORMAT.blobName( + globalMetadataSplitPath[globalMetadataSplitPath.length - 1] + ) + ); + } + clusterMetadataManifest.getIndices().forEach(uploadedIndexMetadata -> { + if (filesToKeep.contains(uploadedIndexMetadata.getUploadedFilename()) == false) { + staleIndexMetadataPaths.add( + new BlobPath().add(INDEX_PATH_TOKEN).add(uploadedIndexMetadata.getIndexUUID()).buildAsString() + + INDEX_METADATA_FORMAT.blobName(uploadedIndexMetadata.getUploadedFilename()) + ); + } + }); + }); + + if (staleManifestPaths.isEmpty()) { + logger.debug("No stale Remote Cluster Metadata files found"); + return; + } + + deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleGlobalMetadataPaths)); + deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleIndexMetadataPaths)); + deleteStalePaths(clusterName, clusterUUID, new ArrayList<>(staleManifestPaths)); + } catch (IllegalStateException e) { + logger.error("Error while fetching Remote Cluster Metadata manifests", e); + } catch (IOException e) { + logger.error("Error while deleting stale Remote Cluster Metadata files", e); + remoteStateStats.cleanUpAttemptFailed(); + } catch (Exception e) { + logger.error("Unexpected error while deleting stale Remote Cluster Metadata files", e); + remoteStateStats.cleanUpAttemptFailed(); + } + } + + private void deleteStalePaths(String clusterName, String clusterUUID, List stalePaths) throws IOException { + logger.debug(String.format(Locale.ROOT, "Deleting stale files from remote - %s", stalePaths)); + getBlobStoreTransferService().deleteBlobs(getCusterMetadataBasePath(clusterName, clusterUUID), stalePaths); + } + + /** + * Purges all remote cluster state against provided cluster UUIDs + * @param clusterState current state of the cluster + * @param committedManifest last committed ClusterMetadataManifest + */ + public void deleteStaleClusterUUIDs(ClusterState clusterState, ClusterMetadataManifest committedManifest) { + threadpool.executor(ThreadPool.Names.REMOTE_PURGE).execute(() -> { + String clusterName = clusterState.getClusterName().value(); + logger.debug("Deleting stale cluster UUIDs data from remote [{}]", clusterName); + Set allClustersUUIDsInRemote; + try { + allClustersUUIDsInRemote = new HashSet<>(getAllClusterUUIDs(clusterState.getClusterName().value())); + } catch (IOException e) { + logger.info(String.format(Locale.ROOT, "Error while fetching all cluster UUIDs for [%s]", clusterName)); + return; + } + // Retain last 2 cluster uuids data + allClustersUUIDsInRemote.remove(committedManifest.getClusterUUID()); + allClustersUUIDsInRemote.remove(committedManifest.getPreviousClusterUUID()); + deleteStaleUUIDsClusterMetadata(clusterName, new ArrayList<>(allClustersUUIDsInRemote)); + }); + } + + public RemotePersistenceStats getStats() { + return remoteStateStats; + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java new file mode 100644 index 0000000000000..f2330846fa23e --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/RemotePersistenceStats.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.cluster.coordination.PersistedStateStats; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Remote state related extended stats. + * + * @opensearch.internal + */ +public class RemotePersistenceStats extends PersistedStateStats { + static final String CLEANUP_ATTEMPT_FAILED_COUNT = "cleanup_attempt_failed_count"; + static final String REMOTE_UPLOAD = "remote_upload"; + private AtomicLong cleanupAttemptFailedCount = new AtomicLong(0); + + public RemotePersistenceStats() { + super(REMOTE_UPLOAD); + addToExtendedFields(CLEANUP_ATTEMPT_FAILED_COUNT, cleanupAttemptFailedCount); + } + + public void cleanUpAttemptFailed() { + cleanupAttemptFailedCount.incrementAndGet(); + } + + public long getCleanupAttemptFailedCount() { + return cleanupAttemptFailedCount.get(); + } +} diff --git a/server/src/main/java/org/opensearch/gateway/remote/package-info.java b/server/src/main/java/org/opensearch/gateway/remote/package-info.java new file mode 100644 index 0000000000000..286e739f66289 --- /dev/null +++ b/server/src/main/java/org/opensearch/gateway/remote/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Package containing class to perform operations on remote cluster state + */ +package org.opensearch.gateway.remote; diff --git a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java index ce02dfb21c587..b8f8abb6c2c23 100644 --- a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java @@ -54,6 +54,12 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.channels.TraceableHttpChannel; +import org.opensearch.telemetry.tracing.channels.TraceableRestChannel; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.BindTransportException; @@ -105,7 +111,8 @@ public abstract class AbstractHttpServerTransport extends AbstractLifecycleCompo private final Set httpChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); private final Set httpServerChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); - private final HttpTracer tracer; + private final HttpTracer httpTracer; + private final Tracer tracer; protected AbstractHttpServerTransport( Settings settings, @@ -114,7 +121,8 @@ protected AbstractHttpServerTransport( ThreadPool threadPool, NamedXContentRegistry xContentRegistry, Dispatcher dispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer telemetryTracer ) { this.settings = settings; this.networkService = networkService; @@ -138,7 +146,8 @@ protected AbstractHttpServerTransport( this.port = SETTING_HTTP_PORT.get(settings); this.maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings); - this.tracer = new HttpTracer(settings, clusterSettings); + this.httpTracer = new HttpTracer(settings, clusterSettings); + this.tracer = telemetryTracer; } @Override @@ -289,6 +298,7 @@ static int resolvePublishPort(Settings settings, List boundAdd } public void onException(HttpChannel channel, Exception e) { + channel.handleException(e); if (lifecycle.started() == false) { // just close and ignore - we are already stopped and just need to make sure we release all resources CloseableChannel.closeChannel(channel); @@ -352,19 +362,31 @@ protected void serverAcceptedChannel(HttpChannel httpChannel) { * @param httpChannel that received the http request */ public void incomingRequest(final HttpRequest httpRequest, final HttpChannel httpChannel) { - handleIncomingRequest(httpRequest, httpChannel, httpRequest.getInboundException()); + final Span span = tracer.startSpan(SpanBuilder.from(httpRequest), httpRequest.getHeaders()); + try (final SpanScope httpRequestSpanScope = tracer.withSpanInScope(span)) { + HttpChannel traceableHttpChannel = TraceableHttpChannel.create(httpChannel, span, tracer); + handleIncomingRequest(httpRequest, traceableHttpChannel, httpRequest.getInboundException()); + } } // Visible for testing void dispatchRequest(final RestRequest restRequest, final RestChannel channel, final Throwable badRequestCause) { + RestChannel traceableRestChannel = channel; final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - if (badRequestCause != null) { - dispatcher.dispatchBadRequest(channel, threadContext, badRequestCause); - } else { - dispatcher.dispatchRequest(restRequest, channel, threadContext); + final Span span = tracer.startSpan(SpanBuilder.from(restRequest)); + try (final SpanScope spanScope = tracer.withSpanInScope(span)) { + if (channel != null) { + traceableRestChannel = TraceableRestChannel.create(channel, span, tracer); + } + if (badRequestCause != null) { + dispatcher.dispatchBadRequest(traceableRestChannel, threadContext, badRequestCause); + } else { + dispatcher.dispatchRequest(restRequest, traceableRestChannel, threadContext); + } } } + } private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChannel httpChannel, final Exception exception) { @@ -401,7 +423,7 @@ private void handleIncomingRequest(final HttpRequest httpRequest, final HttpChan restRequest = innerRestRequest; } - final HttpTracer trace = tracer.maybeTraceRequest(restRequest, exception); + final HttpTracer trace = httpTracer.maybeTraceRequest(restRequest, exception); /* * We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid diff --git a/server/src/main/java/org/opensearch/http/CorsHandler.java b/server/src/main/java/org/opensearch/http/CorsHandler.java index 862c50ae6ac1f..464ced184d10e 100644 --- a/server/src/main/java/org/opensearch/http/CorsHandler.java +++ b/server/src/main/java/org/opensearch/http/CorsHandler.java @@ -81,7 +81,7 @@ * This file is forked from the https://netty.io project. In particular it combines the following three * files: io.netty.handler.codec.http.cors.CorsHandler, io.netty.handler.codec.http.cors.CorsConfig, and * io.netty.handler.codec.http.cors.CorsConfigBuilder. - * + *

          * It modifies the original netty code to operate on OpenSearch http request/response abstractions. * Additionally, it removes CORS features that are not used by OpenSearch. * diff --git a/server/src/main/java/org/opensearch/http/HttpChannel.java b/server/src/main/java/org/opensearch/http/HttpChannel.java index 99aaed23c69b8..679a5d73c7837 100644 --- a/server/src/main/java/org/opensearch/http/HttpChannel.java +++ b/server/src/main/java/org/opensearch/http/HttpChannel.java @@ -36,6 +36,7 @@ import org.opensearch.core.action.ActionListener; import java.net.InetSocketAddress; +import java.util.Optional; /** * Represents an HTTP comms channel @@ -43,6 +44,11 @@ * @opensearch.internal */ public interface HttpChannel extends CloseableChannel { + /** + * Notify HTTP channel that exception happens and the response may not be sent (for example, timeout) + * @param ex the exception being raised + */ + default void handleException(Exception ex) {} /** * Sends an http response to the channel. The listener will be executed once the send process has been @@ -67,4 +73,17 @@ public interface HttpChannel extends CloseableChannel { */ InetSocketAddress getRemoteAddress(); + /** + * Returns the contextual property associated with this specific HTTP channel (the + * implementation of how such properties are managed depends on the the particular + * transport engine). + * + * @param name the name of the property + * @param clazz the expected type of the property + * + * @return the value of the property + */ + default Optional get(String name, Class clazz) { + return Optional.empty(); + } } diff --git a/server/src/main/java/org/opensearch/identity/noop/NoopIdentityPlugin.java b/server/src/main/java/org/opensearch/identity/noop/NoopIdentityPlugin.java index c2367383df595..090b1f1d025e0 100644 --- a/server/src/main/java/org/opensearch/identity/noop/NoopIdentityPlugin.java +++ b/server/src/main/java/org/opensearch/identity/noop/NoopIdentityPlugin.java @@ -14,7 +14,7 @@ /** * Implementation of identity plugin that does not enforce authentication or authorization - * + *

          * This class and related classes in this package will not return nulls or fail access checks * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/identity/noop/NoopSubject.java b/server/src/main/java/org/opensearch/identity/noop/NoopSubject.java index 424a10204aa19..964a218db3cf5 100644 --- a/server/src/main/java/org/opensearch/identity/noop/NoopSubject.java +++ b/server/src/main/java/org/opensearch/identity/noop/NoopSubject.java @@ -17,7 +17,7 @@ /** * Implementation of subject that is always authenticated - * + *

          * This class and related classes in this package will not return nulls or fail permissions checks * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java b/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java index 193966001f44c..4bd3ebdded588 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java +++ b/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java @@ -26,7 +26,7 @@ public class RestTokenExtractor { /** * Given a rest request it will extract authentication token - * + *

          * If no token was found, returns null. */ public static AuthToken extractToken(final RestRequest request) { diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index de4b36ddbe39b..e29283724ebf8 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -48,10 +48,12 @@ import org.opensearch.common.CheckedFunction; import org.opensearch.common.SetOnce; import org.opensearch.common.TriFunction; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; @@ -79,6 +81,7 @@ import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.repositories.RepositoriesService; @@ -118,8 +121,9 @@ * {@link #addSettingsUpdateConsumer(Setting, Consumer)} * * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class IndexModule { public static final Setting NODE_STORE_ALLOW_MMAP = Setting.boolSetting("node.store.allow_mmap", true, Property.NodeScope); @@ -222,7 +226,6 @@ public Iterator> settings() { "tvd", "liv", "dii", - "vec", "vem" ), Function.identity(), @@ -598,7 +601,10 @@ public IndexService newIndexService( BooleanSupplier idFieldDataEnabled, ValuesSourceRegistry valuesSourceRegistry, IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, - BiFunction translogFactorySupplier + BiFunction translogFactorySupplier, + Supplier clusterDefaultRefreshIntervalSupplier, + Supplier clusterRemoteTranslogBufferIntervalSupplier, + RecoverySettings recoverySettings ) throws IOException { final IndexEventListener eventListener = freeze(); Function> readerWrapperFactory = indexReaderWrapper @@ -654,7 +660,10 @@ public IndexService newIndexService( expressionResolver, valuesSourceRegistry, recoveryStateFactory, - translogFactorySupplier + translogFactorySupplier, + clusterDefaultRefreshIntervalSupplier, + clusterRemoteTranslogBufferIntervalSupplier, + recoverySettings ); success = true; return indexService; diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 4d2ee3ca37487..84e8e2f41aaf1 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -78,7 +78,7 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.SearchIndexNameMatcher; -import org.opensearch.index.remote.RemoteStorePressureService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; @@ -95,6 +95,7 @@ import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.plugins.IndexStorePlugin; @@ -176,6 +177,9 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final Supplier indexSortSupplier; private final ValuesSourceRegistry valuesSourceRegistry; private final BiFunction translogFactorySupplier; + private final Supplier clusterDefaultRefreshIntervalSupplier; + private final Supplier clusterRemoteTranslogBufferIntervalSupplier; + private final RecoverySettings recoverySettings; public IndexService( IndexSettings indexSettings, @@ -208,7 +212,10 @@ public IndexService( IndexNameExpressionResolver expressionResolver, ValuesSourceRegistry valuesSourceRegistry, IndexStorePlugin.RecoveryStateFactory recoveryStateFactory, - BiFunction translogFactorySupplier + BiFunction translogFactorySupplier, + Supplier clusterDefaultRefreshIntervalSupplier, + Supplier clusterRemoteTranslogBufferIntervalSupplier, + RecoverySettings recoverySettings ) { super(indexSettings); this.allowExpensiveQueries = allowExpensiveQueries; @@ -236,8 +243,10 @@ public IndexService( if (indexSettings.getIndexSortConfig().hasIndexSort()) { // we delay the actual creation of the sort order for this index because the mapping has not been merged yet. // The sort order is validated right after the merge of the mapping later in the process. + boolean shouldWidenIndexSortType = this.indexSettings.shouldWidenIndexSortType(); this.indexSortSupplier = () -> indexSettings.getIndexSortConfig() .buildIndexSort( + shouldWidenIndexSortType, mapperService::fieldType, (fieldType, searchLookup) -> indexFieldData.getForField(fieldType, indexFieldData.index().getName(), searchLookup) ); @@ -275,12 +284,15 @@ public IndexService( this.readerWrapper = wrapperFactory.apply(this); this.searchOperationListeners = Collections.unmodifiableList(searchOperationListeners); this.indexingOperationListeners = Collections.unmodifiableList(indexingOperationListeners); + this.clusterDefaultRefreshIntervalSupplier = clusterDefaultRefreshIntervalSupplier; // kick off async ops for the first shard in this index this.refreshTask = new AsyncRefreshTask(this); this.trimTranslogTask = new AsyncTrimTranslogTask(this); this.globalCheckpointTask = new AsyncGlobalCheckpointTask(this); this.retentionLeaseSyncTask = new AsyncRetentionLeaseSyncTask(this); this.translogFactorySupplier = translogFactorySupplier; + this.clusterRemoteTranslogBufferIntervalSupplier = clusterRemoteTranslogBufferIntervalSupplier; + this.recoverySettings = recoverySettings; updateFsyncTaskIfNecessary(); } @@ -440,7 +452,7 @@ public synchronized IndexShard createShard( final Consumer globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, final SegmentReplicationCheckpointPublisher checkpointPublisher, - final RemoteStorePressureService remoteStorePressureService + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); /* @@ -473,7 +485,7 @@ public synchronized IndexShard createShard( Store remoteStore = null; if (this.indexSettings.isRemoteStoreEnabled()) { Directory remoteDirectory = remoteDirectoryFactory.newDirectory(this.indexSettings, path); - remoteStore = new Store(shardId, this.indexSettings, remoteDirectory, lock, Store.OnClose.EMPTY); + remoteStore = new Store(shardId, this.indexSettings, remoteDirectory, lock, Store.OnClose.EMPTY, path); } Directory directory = directoryFactory.newDirectory(this.indexSettings, path); @@ -482,7 +494,8 @@ public synchronized IndexShard createShard( this.indexSettings, directory, lock, - new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId)) + new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId)), + path ); eventListener.onStoreCreated(shardId); indexShard = new IndexShard( @@ -509,7 +522,10 @@ public synchronized IndexShard createShard( translogFactorySupplier, this.indexSettings.isSegRepEnabled() ? checkpointPublisher : null, remoteStore, - remoteStorePressureService + remoteStoreStatsTrackerFactory, + clusterRemoteTranslogBufferIntervalSupplier, + nodeEnv.nodeId(), + recoverySettings ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); @@ -680,7 +696,7 @@ public IndexSettings getIndexSettings() { /** * Creates a new QueryShardContext. - * + *

          * Passing a {@code null} {@link IndexSearcher} will return a valid context, however it won't be able to make * {@link IndexReader}-specific optimizations, such as rewriting containing range queries. */ @@ -690,7 +706,7 @@ public QueryShardContext newQueryShardContext(int shardId, IndexSearcher searche /** * Creates a new QueryShardContext. - * + *

          * Passing a {@code null} {@link IndexSearcher} will return a valid context, however it won't be able to make * {@link IndexReader}-specific optimizations, such as rewriting containing range queries. */ @@ -895,36 +911,47 @@ public synchronized void updateMetadata(final IndexMetadata currentIndexMetadata ); } } - if (refreshTask.getInterval().equals(indexSettings.getRefreshInterval()) == false) { - // once we change the refresh interval we schedule yet another refresh - // to ensure we are in a clean and predictable state. - // it doesn't matter if we move from or to -1 in both cases we want - // docs to become visible immediately. This also flushes all pending indexing / search requests - // that are waiting for a refresh. - threadPool.executor(ThreadPool.Names.REFRESH).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - logger.warn("forced refresh failed after interval change", e); - } - - @Override - protected void doRun() throws Exception { - maybeRefreshEngine(true); - } - - @Override - public boolean isForceExecution() { - return true; - } - }); - rescheduleRefreshTasks(); - } + onRefreshIntervalChange(); updateFsyncTaskIfNecessary(); } metadataListeners.forEach(c -> c.accept(newIndexMetadata)); } + /** + * Called whenever the refresh interval changes. This can happen in 2 cases - + * 1. {@code cluster.default.index.refresh_interval} cluster setting changes. The change would only happen for + * indexes relying on cluster default. + * 2. {@code index.refresh_interval} index setting changes. + */ + public void onRefreshIntervalChange() { + if (refreshTask.getInterval().equals(getRefreshInterval())) { + return; + } + // once we change the refresh interval we schedule yet another refresh + // to ensure we are in a clean and predictable state. + // it doesn't matter if we move from or to -1 in both cases we want + // docs to become visible immediately. This also flushes all pending indexing / search requests + // that are waiting for a refresh. + threadPool.executor(ThreadPool.Names.REFRESH).execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + logger.warn("forced refresh failed after interval change", e); + } + + @Override + protected void doRun() throws Exception { + maybeRefreshEngine(true); + } + + @Override + public boolean isForceExecution() { + return true; + } + }); + rescheduleRefreshTasks(); + } + private void updateFsyncTaskIfNecessary() { if (indexSettings.getTranslogDurability() == Translog.Durability.REQUEST) { try { @@ -989,7 +1016,7 @@ private void maybeFSyncTranslogs() { } private void maybeRefreshEngine(boolean force) { - if (indexSettings.getRefreshInterval().millis() > 0 || force) { + if (getRefreshInterval().millis() > 0 || force) { for (IndexShard shard : this.shards.values()) { try { shard.scheduledRefresh(); @@ -1060,6 +1087,17 @@ private void sync(final Consumer sync, final String source) { } } + /** + * Gets the refresh interval seen by the index service. Index setting overrides takes the highest precedence. + * @return the refresh interval. + */ + private TimeValue getRefreshInterval() { + if (getIndexSettings().isExplicitRefresh()) { + return getIndexSettings().getRefreshInterval(); + } + return clusterDefaultRefreshIntervalSupplier.get(); + } + /** * Base asynchronous task * @@ -1120,7 +1158,7 @@ public String toString() { final class AsyncRefreshTask extends BaseAsyncTask { AsyncRefreshTask(IndexService indexService) { - super(indexService, indexService.getIndexSettings().getRefreshInterval()); + super(indexService, indexService.getRefreshInterval()); } @Override @@ -1242,11 +1280,16 @@ AsyncRefreshTask getRefreshTask() { // for tests return refreshTask; } + // Visible for test + public TimeValue getRefreshTaskInterval() { + return refreshTask.getInterval(); + } + AsyncTranslogFSync getFsyncTask() { // for tests return fsyncTask; } - AsyncTrimTranslogTask getTrimTranslogTask() { // for tests + public AsyncTrimTranslogTask getTrimTranslogTask() { // for tests return trimTranslogTask; } diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 2f70bbab85c09..ab478602e25c4 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -37,6 +37,7 @@ import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.Loggers; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; @@ -54,6 +55,7 @@ import org.opensearch.node.Node; import org.opensearch.search.pipeline.SearchPipelineService; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -63,6 +65,7 @@ import java.util.function.Function; import java.util.function.UnaryOperator; +import static org.opensearch.Version.V_2_7_0; import static org.opensearch.common.util.FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING; @@ -78,12 +81,46 @@ * a settings consumer at index creation via {@link IndexModule#addSettingsUpdateConsumer(Setting, Consumer)} that will * be called for each settings update. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class IndexSettings { - private static final String MERGE_ON_FLUSH_DEFAULT_POLICY = "default"; + private static final String DEFAULT_POLICY = "default"; private static final String MERGE_ON_FLUSH_MERGE_POLICY = "merge-on-flush"; + /** + * Enum representing supported merge policies + */ + public enum IndexMergePolicy { + TIERED("tiered"), + LOG_BYTE_SIZE("log_byte_size"), + DEFAULT_POLICY(IndexSettings.DEFAULT_POLICY); + + private final String value; + + IndexMergePolicy(String value) { + this.value = value; + } + + public String getValue() { + return value; + } + + public static IndexMergePolicy fromString(String text) { + for (IndexMergePolicy policy : IndexMergePolicy.values()) { + if (policy.value.equals(text)) { + return policy; + } + } + throw new IllegalArgumentException( + "The setting has unsupported policy specified: " + + text + + ". Please use one of: " + + String.join(", ", Arrays.stream(IndexMergePolicy.values()).map(IndexMergePolicy::getValue).toArray(String[]::new)) + ); + } + } + public static final Setting> DEFAULT_FIELD_SETTING = Setting.listSetting( "index.query.default_field", Collections.singletonList("*"), @@ -300,10 +337,11 @@ public final class IndexSettings { Property.Deprecated ); public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS); + public static final TimeValue MINIMUM_REFRESH_INTERVAL = new TimeValue(-1, TimeUnit.MILLISECONDS); public static final Setting INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting( "index.refresh_interval", DEFAULT_REFRESH_INTERVAL, - new TimeValue(-1, TimeUnit.MILLISECONDS), + MINIMUM_REFRESH_INTERVAL, Property.Dynamic, Property.IndexScope ); @@ -511,13 +549,25 @@ public final class IndexSettings { Property.Dynamic ); + /** + * This setting controls if unreferenced files will be cleaned up in case segment merge fails due to disk full. + *

          + * Defaults to true which means unreferenced files will be cleaned up in case segment merge fails. + */ + public static final Setting INDEX_UNREFERENCED_FILE_CLEANUP = Setting.boolSetting( + "index.unreferenced_file_cleanup.enabled", + true, + Property.IndexScope, + Property.Dynamic + ); + /** * Determines a balance between file-based and operations-based peer recoveries. The number of operations that will be used in an * operations-based peer recovery is limited to this proportion of the total number of documents in the shard (including deleted * documents) on the grounds that a file-based peer recovery may copy all of the documents in the shard over to the new peer, but is * significantly faster than replaying the missing operations on the peer, so once a peer falls far enough behind the primary it makes * more sense to copy all the data over again instead of replaying history. - * + *

          * Defaults to retaining history for up to 10% of the documents in the shard. This can only be changed in tests, since this setting is * intentionally unregistered. */ @@ -551,11 +601,25 @@ public final class IndexSettings { public static final Setting INDEX_MERGE_ON_FLUSH_POLICY = Setting.simpleString( "index.merge_on_flush.policy", - MERGE_ON_FLUSH_DEFAULT_POLICY, + DEFAULT_POLICY, Property.IndexScope, Property.Dynamic ); + public static final Setting INDEX_MERGE_POLICY = Setting.simpleString( + "index.merge.policy", + DEFAULT_POLICY, + IndexMergePolicy::fromString, + Property.IndexScope + ); + + public static final Setting TIME_SERIES_INDEX_MERGE_POLICY = Setting.simpleString( + "indices.time_series_index.default_index_merge_policy", + DEFAULT_POLICY, + IndexMergePolicy::fromString, + Property.NodeScope + ); + public static final Setting SEARCHABLE_SNAPSHOT_REPOSITORY = Setting.simpleString( "index.searchable_snapshot.repository", Property.IndexScope, @@ -605,6 +669,14 @@ public final class IndexSettings { Property.IndexScope ); + public static final Setting INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING = Setting.intSetting( + "index.remote_store.translog.keep_extra_gen", + 100, + 0, + Property.Dynamic, + Property.IndexScope + ); + private final Index index; private final Version version; private final Logger logger; @@ -617,6 +689,7 @@ public final class IndexSettings { private final String remoteStoreTranslogRepository; private final String remoteStoreRepository; private final boolean isRemoteSnapshot; + private int remoteTranslogKeepExtraGen; private Version extendedCompatibilitySnapshotVersion; // volatile fields are updated via #updateIndexMetadata(IndexMetadata) under lock private volatile Settings settings; @@ -635,7 +708,8 @@ public final class IndexSettings { private volatile ByteSizeValue generationThresholdSize; private volatile ByteSizeValue flushAfterMergeThresholdSize; private final MergeSchedulerConfig mergeSchedulerConfig; - private final MergePolicyConfig mergePolicyConfig; + private final TieredMergePolicyProvider tieredMergePolicyProvider; + private final LogByteSizeMergePolicyProvider logByteSizeMergePolicyProvider; private final IndexSortConfig indexSortConfig; private final IndexScopedSettings scopedSettings; private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis(); @@ -645,6 +719,7 @@ public final class IndexSettings { private volatile long retentionLeaseMillis; private volatile String defaultSearchPipeline; + private final boolean widenIndexSortType; /** * The maximum age of a retention lease before it is considered expired. @@ -675,6 +750,7 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { private volatile String defaultPipeline; private volatile String requiredPipeline; private volatile boolean searchThrottled; + private volatile boolean shouldCleanupUnreferencedFiles; private volatile long mappingNestedFieldsLimit; private volatile long mappingNestedDocsLimit; private volatile long mappingTotalFieldsLimit; @@ -783,6 +859,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti remoteStoreTranslogRepository = settings.get(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY); remoteTranslogUploadBufferInterval = INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); remoteStoreRepository = settings.get(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY); + this.remoteTranslogKeepExtraGen = INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.get(settings); isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(this.settings); if (isRemoteSnapshot && FeatureFlags.isEnabled(SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY)) { @@ -791,6 +868,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti extendedCompatibilitySnapshotVersion = Version.CURRENT.minimumIndexCompatibilityVersion(); } this.searchThrottled = INDEX_SEARCH_THROTTLED.get(settings); + this.shouldCleanupUnreferencedFiles = INDEX_UNREFERENCED_FILE_CLEANUP.get(settings); this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings); this.queryStringAnalyzeWildcard = QUERY_STRING_ANALYZE_WILDCARD.get(nodeSettings); this.queryStringAllowLeadingWildcard = QUERY_STRING_ALLOW_LEADING_WILDCARD.get(nodeSettings); @@ -824,7 +902,8 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti maxAnalyzedOffset = scopedSettings.get(MAX_ANALYZED_OFFSET_SETTING); maxTermsCount = scopedSettings.get(MAX_TERMS_COUNT_SETTING); maxRegexLength = scopedSettings.get(MAX_REGEX_LENGTH_SETTING); - this.mergePolicyConfig = new MergePolicyConfig(logger, this); + this.tieredMergePolicyProvider = new TieredMergePolicyProvider(logger, this); + this.logByteSizeMergePolicyProvider = new LogByteSizeMergePolicyProvider(logger, this); this.indexSortConfig = new IndexSortConfig(this); searchIdleAfter = scopedSettings.get(INDEX_SEARCH_IDLE_AFTER); defaultPipeline = scopedSettings.get(DEFAULT_PIPELINE); @@ -839,33 +918,66 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti mergeOnFlushEnabled = scopedSettings.get(INDEX_MERGE_ON_FLUSH_ENABLED); setMergeOnFlushPolicy(scopedSettings.get(INDEX_MERGE_ON_FLUSH_POLICY)); defaultSearchPipeline = scopedSettings.get(DEFAULT_SEARCH_PIPELINE); - - scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); + /* There was unintentional breaking change got introduced with [OpenSearch-6424](https://github.com/opensearch-project/OpenSearch/pull/6424) (version 2.7). + * For indices created prior version (prior to 2.7) which has IndexSort type, they used to type cast the SortField.Type + * to higher bytes size like integer to long. This behavior was changed from OpenSearch 2.7 version not to + * up cast the SortField to gain some sort query optimizations. + * Now this sortField (IndexSort) is stored in SegmentInfo and we need to maintain backward compatibility for them. + */ + widenIndexSortType = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).before(V_2_7_0); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, - mergePolicyConfig::setDeletesPctAllowed + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING, + tieredMergePolicyProvider::setNoCFSRatio ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, - mergePolicyConfig::setExpungeDeletesAllowed + TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, + tieredMergePolicyProvider::setDeletesPctAllowed ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, - mergePolicyConfig::setFloorSegmentSetting + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, + tieredMergePolicyProvider::setExpungeDeletesAllowed ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, - mergePolicyConfig::setMaxMergesAtOnce + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, + tieredMergePolicyProvider::setFloorSegmentSetting ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, - mergePolicyConfig::setMaxMergedSegment + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, + tieredMergePolicyProvider::setMaxMergesAtOnce ); scopedSettings.addSettingsUpdateConsumer( - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, - mergePolicyConfig::setSegmentsPerTier + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, + tieredMergePolicyProvider::setMaxMergedSegment + ); + scopedSettings.addSettingsUpdateConsumer( + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, + tieredMergePolicyProvider::setSegmentsPerTier ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING, + logByteSizeMergePolicyProvider::setLBSMergeFactor + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING, + logByteSizeMergePolicyProvider::setLBSMinMergedMB + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeSegment + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeMBForForcedMerge + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING, + logByteSizeMergePolicyProvider::setLBSMaxMergeDocs + ); + scopedSettings.addSettingsUpdateConsumer( + LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING, + logByteSizeMergePolicyProvider::setLBSNoCFSRatio + ); scopedSettings.addSettingsUpdateConsumer( MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING, @@ -903,6 +1015,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(FINAL_PIPELINE, this::setRequiredPipeline); scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING, this::setSoftDeleteRetentionOperations); scopedSettings.addSettingsUpdateConsumer(INDEX_SEARCH_THROTTLED, this::setSearchThrottled); + scopedSettings.addSettingsUpdateConsumer(INDEX_UNREFERENCED_FILE_CLEANUP, this::setShouldCleanupUnreferencedFiles); scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING, this::setRetentionLeaseMillis); scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING, this::setMappingNestedFieldsLimit); scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING, this::setMappingNestedDocsLimit); @@ -917,9 +1030,13 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setRemoteTranslogUploadBufferInterval ); + scopedSettings.addSettingsUpdateConsumer(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING, this::setRemoteTranslogKeepExtraGen); } private void setSearchIdleAfter(TimeValue searchIdleAfter) { + if (this.isRemoteStoreEnabled) { + logger.warn("Search idle is not supported for remote backed indices"); + } if (this.replicationType == ReplicationType.SEGMENT && this.getNumberOfReplicas() > 0) { logger.warn("Search idle is not supported for indices with replicas using 'replication.type: SEGMENT'"); } @@ -1044,11 +1161,11 @@ public boolean isSegRepEnabled() { } public boolean isSegRepLocalEnabled() { - return isSegRepEnabled() && !isSegRepWithRemoteEnabled(); + return isSegRepEnabled() && !isRemoteStoreEnabled(); } public boolean isSegRepWithRemoteEnabled() { - return isSegRepEnabled() && isRemoteStoreEnabled() && FeatureFlags.isEnabled(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL); + return isSegRepEnabled() && isRemoteStoreEnabled(); } /** @@ -1145,7 +1262,9 @@ public synchronized boolean updateIndexMetadata(IndexMetadata indexMetadata) { */ public static boolean same(final Settings left, final Settings right) { return left.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE) - .equals(right.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE)); + .equals(right.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE)) + && left.filter(IndexScopedSettings.ARCHIVED_SETTINGS_KEY_PREDICATE) + .equals(right.filter(IndexScopedSettings.ARCHIVED_SETTINGS_KEY_PREDICATE)); } /** @@ -1191,10 +1310,25 @@ public TimeValue getRemoteTranslogUploadBufferInterval() { return remoteTranslogUploadBufferInterval; } + public int getRemoteTranslogExtraKeep() { + return remoteTranslogKeepExtraGen; + } + + /** + * Returns true iff the remote translog buffer interval setting exists or in other words is explicitly set. + */ + public boolean isRemoteTranslogBufferIntervalExplicit() { + return INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.exists(settings); + } + public void setRemoteTranslogUploadBufferInterval(TimeValue remoteTranslogUploadBufferInterval) { this.remoteTranslogUploadBufferInterval = remoteTranslogUploadBufferInterval; } + public void setRemoteTranslogKeepExtraGen(int extraGen) { + this.remoteTranslogKeepExtraGen = extraGen; + } + /** * Returns this interval in which the shards of this index are asynchronously refreshed. {@code -1} means async refresh is disabled. */ @@ -1403,9 +1537,43 @@ public long getGcDeletesInMillis() { /** * Returns the merge policy that should be used for this index. - */ - public MergePolicy getMergePolicy() { - return mergePolicyConfig.getMergePolicy(); + * @param isTimeSeriesIndex true if index contains @timestamp field + */ + public MergePolicy getMergePolicy(boolean isTimeSeriesIndex) { + String indexScopedPolicy = scopedSettings.get(INDEX_MERGE_POLICY); + MergePolicyProvider mergePolicyProvider = null; + IndexMergePolicy indexMergePolicy = IndexMergePolicy.fromString(indexScopedPolicy); + switch (indexMergePolicy) { + case TIERED: + mergePolicyProvider = tieredMergePolicyProvider; + break; + case LOG_BYTE_SIZE: + mergePolicyProvider = logByteSizeMergePolicyProvider; + break; + case DEFAULT_POLICY: + if (isTimeSeriesIndex) { + String nodeScopedTimeSeriesIndexPolicy = TIME_SERIES_INDEX_MERGE_POLICY.get(nodeSettings); + IndexMergePolicy nodeMergePolicy = IndexMergePolicy.fromString(nodeScopedTimeSeriesIndexPolicy); + switch (nodeMergePolicy) { + case TIERED: + case DEFAULT_POLICY: + mergePolicyProvider = tieredMergePolicyProvider; + break; + case LOG_BYTE_SIZE: + mergePolicyProvider = logByteSizeMergePolicyProvider; + break; + } + } else { + mergePolicyProvider = tieredMergePolicyProvider; + } + break; + } + assert mergePolicyProvider != null : "should not happen as validation for invalid merge policy values " + + "are part of setting definition"; + if (logger.isTraceEnabled()) { + logger.trace("Index: " + this.index.getName() + ", Merge policy used: " + mergePolicyProvider); + } + return mergePolicyProvider.getMergePolicy(); } public T getValue(Setting setting) { @@ -1527,6 +1695,18 @@ private void setSearchThrottled(boolean searchThrottled) { this.searchThrottled = searchThrottled; } + /** + * Returns true if unreferenced files should be cleaned up on merge failure for this index. + * + */ + public boolean shouldCleanupUnreferencedFiles() { + return shouldCleanupUnreferencedFiles; + } + + private void setShouldCleanupUnreferencedFiles(boolean shouldCleanupUnreferencedFiles) { + this.shouldCleanupUnreferencedFiles = shouldCleanupUnreferencedFiles; + } + public long getMappingNestedFieldsLimit() { return mappingNestedFieldsLimit; } @@ -1584,7 +1764,7 @@ public boolean isMergeOnFlushEnabled() { } private void setMergeOnFlushPolicy(String policy) { - if (Strings.isEmpty(policy) || MERGE_ON_FLUSH_DEFAULT_POLICY.equalsIgnoreCase(policy)) { + if (Strings.isEmpty(policy) || DEFAULT_POLICY.equalsIgnoreCase(policy)) { mergeOnFlushPolicy = null; } else if (MERGE_ON_FLUSH_MERGE_POLICY.equalsIgnoreCase(policy)) { this.mergeOnFlushPolicy = MergeOnFlushMergePolicy::new; @@ -1595,7 +1775,7 @@ private void setMergeOnFlushPolicy(String policy) { + " has unsupported policy specified: " + policy + ". Please use one of: " - + MERGE_ON_FLUSH_DEFAULT_POLICY + + DEFAULT_POLICY + ", " + MERGE_ON_FLUSH_MERGE_POLICY ); @@ -1613,4 +1793,12 @@ public String getDefaultSearchPipeline() { public void setDefaultSearchPipeline(String defaultSearchPipeline) { this.defaultSearchPipeline = defaultSearchPipeline; } + + /** + * Returns true if we need to maintain backward compatibility for index sorted indices created prior to version 2.7 + * @return boolean + */ + public boolean shouldWidenIndexSortType() { + return this.widenIndexSortType; + } } diff --git a/server/src/main/java/org/opensearch/index/IndexSortConfig.java b/server/src/main/java/org/opensearch/index/IndexSortConfig.java index f73f96df4f9ad..9edb268a5126c 100644 --- a/server/src/main/java/org/opensearch/index/IndexSortConfig.java +++ b/server/src/main/java/org/opensearch/index/IndexSortConfig.java @@ -53,7 +53,7 @@ /** * Holds all the information that is used to build the sort order of an index. - * + *

          * The index sort settings are final and can be defined only at index creation. * These settings are divided in four lists that are merged during the initialization of this class: *

            @@ -200,6 +200,7 @@ public boolean hasPrimarySortOnField(String field) { * or returns null if this index has no sort. */ public Sort buildIndexSort( + boolean shouldWidenIndexSortType, Function fieldTypeLookup, BiFunction, IndexFieldData> fieldDataLookup ) { @@ -230,7 +231,11 @@ public Sort buildIndexSort( if (fieldData == null) { throw new IllegalArgumentException("docvalues not found for index sort field:[" + sortSpec.field + "]"); } - sortFields[i] = fieldData.sortField(sortSpec.missingValue, mode, null, reverse); + if (shouldWidenIndexSortType == true) { + sortFields[i] = fieldData.wideSortField(sortSpec.missingValue, mode, null, reverse); + } else { + sortFields[i] = fieldData.sortField(sortSpec.missingValue, mode, null, reverse); + } validateIndexSortField(sortFields[i]); } return new Sort(sortFields); diff --git a/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java b/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java new file mode 100644 index 0000000000000..0b762d781957c --- /dev/null +++ b/server/src/main/java/org/opensearch/index/LogByteSizeMergePolicyProvider.java @@ -0,0 +1,166 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.LogByteSizeMergePolicy; +import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.NoMergePolicy; +import org.opensearch.common.settings.Setting; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; + +import static org.apache.lucene.index.LogMergePolicy.DEFAULT_MAX_MERGE_DOCS; +import static org.apache.lucene.index.LogMergePolicy.DEFAULT_NO_CFS_RATIO; + +/** + *

            + * The LogByteSizeMergePolicy is an alternative merge policy primarily used here to optimize the merging of segments in scenarios + * with index with timestamps. + * While the TieredMergePolicy is the default choice, the LogByteSizeMergePolicy can be configured + * as the default merge policy for time-index data using the index.datastream_merge.policy setting. + * + *

            + * Unlike the TieredMergePolicy, which prioritizes merging segments of equal sizes, the LogByteSizeMergePolicy + * specializes in merging adjacent segments efficiently. + * This characteristic makes it particularly well-suited for range queries on time-index data. + * Typically, adjacent segments in time-index data often contain documents with similar timestamps. + * When these segments are merged, the resulting segment covers a range of timestamps with reduced overlap compared + * to the adjacent segments. This reduced overlap remains even as segments grow older and larger, + * which can significantly benefit range queries on timestamps. + * + *

            + * In contrast, the TieredMergePolicy does not honor this timestamp range optimization. It focuses on merging segments + * of equal sizes and does not consider adjacency. Consequently, as segments grow older and larger, + * the overlap of timestamp ranges among adjacent segments managed by TieredMergePolicy can increase. + * This can lead to inefficiencies in range queries on timestamps, as the number of segments to be scanned + * within a given timestamp range could become high. + * + * @opensearch.internal + */ +public class LogByteSizeMergePolicyProvider implements MergePolicyProvider { + private final LogByteSizeMergePolicy logByteSizeMergePolicy = new LogByteSizeMergePolicy(); + + private final Logger logger; + private final boolean mergesEnabled; + + public static final ByteSizeValue DEFAULT_MIN_MERGE = new ByteSizeValue(2, ByteSizeUnit.MB); + public static final int DEFAULT_MERGE_FACTOR = 10; + + public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB); + + public static final ByteSizeValue DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE = new ByteSizeValue(Long.MAX_VALUE); + + public static final Setting INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING = Setting.intSetting( + "index.merge.log_byte_size_policy.merge_factor", + DEFAULT_MERGE_FACTOR, // keeping it same as default max merge at once for tiered merge policy + 2, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.min_merge", + DEFAULT_MIN_MERGE, // keeping it same as default floor segment for tiered merge policy + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MAX_MERGE_SEGMENT_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.max_merge_segment", + DEFAULT_MAX_MERGED_SEGMENT, // keeping default same as tiered merge policy + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING = Setting.byteSizeSetting( + "index.merge.log_byte_size_policy.max_merge_segment_forced_merge", + DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_MAX_MERGED_DOCS_SETTING = Setting.intSetting( + "index.merge.log_byte_size_policy.max_merged_docs", + DEFAULT_MAX_MERGE_DOCS, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + public static final Setting INDEX_LBS_NO_CFS_RATIO_SETTING = new Setting<>( + "index.merge.log_byte_size_policy.no_cfs_ratio", + Double.toString(DEFAULT_NO_CFS_RATIO), + TieredMergePolicyProvider::parseNoCFSRatio, + Setting.Property.Dynamic, + Setting.Property.IndexScope + ); + + LogByteSizeMergePolicyProvider(Logger logger, IndexSettings indexSettings) { + this.logger = logger; + this.mergesEnabled = indexSettings.getSettings().getAsBoolean(INDEX_MERGE_ENABLED, true); + + // Undocumented settings, works great with defaults + logByteSizeMergePolicy.setMergeFactor(indexSettings.getValue(INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING)); + logByteSizeMergePolicy.setMinMergeMB(indexSettings.getValue(INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING).getMbFrac()); + logByteSizeMergePolicy.setMaxMergeMB(indexSettings.getValue(INDEX_LBS_MAX_MERGE_SEGMENT_SETTING).getMbFrac()); + logByteSizeMergePolicy.setMaxMergeMBForForcedMerge( + indexSettings.getValue(INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING).getMbFrac() + ); + logByteSizeMergePolicy.setMaxMergeDocs(indexSettings.getValue(INDEX_LBS_MAX_MERGED_DOCS_SETTING)); + logByteSizeMergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_LBS_NO_CFS_RATIO_SETTING)); + } + + @Override + public MergePolicy getMergePolicy() { + return mergesEnabled ? logByteSizeMergePolicy : NoMergePolicy.INSTANCE; + } + + void setLBSMergeFactor(int mergeFactor) { + logByteSizeMergePolicy.setMergeFactor(mergeFactor); + } + + void setLBSMaxMergeSegment(ByteSizeValue maxMergeSegment) { + logByteSizeMergePolicy.setMaxMergeMB(maxMergeSegment.getMbFrac()); + } + + void setLBSMinMergedMB(ByteSizeValue minMergedSize) { + logByteSizeMergePolicy.setMinMergeMB(minMergedSize.getMbFrac()); + } + + void setLBSMaxMergeMBForForcedMerge(ByteSizeValue maxMergeForcedMerge) { + logByteSizeMergePolicy.setMaxMergeMBForForcedMerge(maxMergeForcedMerge.getMbFrac()); + } + + void setLBSMaxMergeDocs(int maxMergeDocs) { + logByteSizeMergePolicy.setMaxMergeDocs(maxMergeDocs); + } + + void setLBSNoCFSRatio(Double noCFSRatio) { + logByteSizeMergePolicy.setNoCFSRatio(noCFSRatio); + } + + @Override + public String toString() { + return "LogByteSizeMergePolicyProvider{" + + "mergeFactor=" + + logByteSizeMergePolicy.getMergeFactor() + + ", minMergeMB=" + + logByteSizeMergePolicy.getMinMergeMB() + + ", maxMergeMB=" + + logByteSizeMergePolicy.getMaxMergeMB() + + ", maxMergeMBForForcedMerge=" + + logByteSizeMergePolicy.getMaxMergeMBForForcedMerge() + + ", maxMergedDocs=" + + logByteSizeMergePolicy.getMaxMergeDocs() + + ", noCFSRatio=" + + logByteSizeMergePolicy.getNoCFSRatio() + + '}'; + } + +} diff --git a/server/src/main/java/org/opensearch/index/MergePolicyProvider.java b/server/src/main/java/org/opensearch/index/MergePolicyProvider.java new file mode 100644 index 0000000000000..6f734314f758f --- /dev/null +++ b/server/src/main/java/org/opensearch/index/MergePolicyProvider.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.apache.lucene.index.MergePolicy; +import org.opensearch.common.annotation.InternalApi; + +/** + * A provider for obtaining merge policies used by OpenSearch indexes. + * + * @opensearch.internal + */ + +@InternalApi +public interface MergePolicyProvider { + // don't convert to Setting<> and register... we only set this in tests and register via a plugin + String INDEX_MERGE_ENABLED = "index.merge.enabled"; + + /** + * Gets the merge policy to be used for index. + * + * @return The merge policy instance. + */ + MergePolicy getMergePolicy(); +} diff --git a/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java b/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java index 9e170b448d641..b2b7781a20d26 100644 --- a/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java +++ b/server/src/main/java/org/opensearch/index/MergeSchedulerConfig.java @@ -48,14 +48,14 @@ * *

              *
            • index.merge.scheduler.max_thread_count: - * + *

              * The maximum number of threads that may be merging at once. Defaults to * Math.max(1, Math.min(4, {@link OpenSearchExecutors#allocatedProcessors(Settings)} / 2)) * which works well for a good solid-state-disk (SSD). If your index is on * spinning platter drives instead, decrease this to 1. * *

            • index.merge.scheduler.auto_throttle: - * + *

              * If this is true (the default), then the merge scheduler will rate-limit IO * (writes) for merges to an adaptive value depending on how many merges are * requested over time. An application with a low indexing rate that diff --git a/server/src/main/java/org/opensearch/index/OpenSearchTieredMergePolicy.java b/server/src/main/java/org/opensearch/index/OpenSearchTieredMergePolicy.java index 637282374de73..df1666e72f2ee 100644 --- a/server/src/main/java/org/opensearch/index/OpenSearchTieredMergePolicy.java +++ b/server/src/main/java/org/opensearch/index/OpenSearchTieredMergePolicy.java @@ -42,7 +42,7 @@ /** * Wrapper around {@link TieredMergePolicy} which doesn't respect - * {@link TieredMergePolicy#setMaxMergedSegmentMB(double)} on forced merges. + * {@link TieredMergePolicy#setMaxMergedSegmentMB(double)} on forced merges, but DOES respect it on only_expunge_deletes. * See https://issues.apache.org/jira/browse/LUCENE-7976. * * @opensearch.internal @@ -71,7 +71,7 @@ public MergeSpecification findForcedMerges( @Override public MergeSpecification findForcedDeletesMerges(SegmentInfos infos, MergeContext mergeContext) throws IOException { - return forcedMergePolicy.findForcedDeletesMerges(infos, mergeContext); + return regularMergePolicy.findForcedDeletesMerges(infos, mergeContext); } public void setForceMergeDeletesPctAllowed(double forceMergeDeletesPctAllowed) { @@ -80,7 +80,7 @@ public void setForceMergeDeletesPctAllowed(double forceMergeDeletesPctAllowed) { } public double getForceMergeDeletesPctAllowed() { - return forcedMergePolicy.getForceMergeDeletesPctAllowed(); + return regularMergePolicy.getForceMergeDeletesPctAllowed(); } public void setFloorSegmentMB(double mbFrac) { diff --git a/server/src/main/java/org/opensearch/index/ReplicationStats.java b/server/src/main/java/org/opensearch/index/ReplicationStats.java new file mode 100644 index 0000000000000..0ae4526365bf1 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/ReplicationStats.java @@ -0,0 +1,95 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * ReplicationStats is used to provide segment replication statistics at an index, + * node and cluster level on a segment replication enabled cluster. + * + * @opensearch.internal + */ +public class ReplicationStats implements ToXContentFragment, Writeable { + + public long maxBytesBehind; + public long maxReplicationLag; + public long totalBytesBehind; + + public ReplicationStats(long maxBytesBehind, long totalBytesBehind, long maxReplicationLag) { + this.maxBytesBehind = maxBytesBehind; + this.totalBytesBehind = totalBytesBehind; + this.maxReplicationLag = maxReplicationLag; + } + + public ReplicationStats(StreamInput in) throws IOException { + this.maxBytesBehind = in.readVLong(); + this.totalBytesBehind = in.readVLong(); + this.maxReplicationLag = in.readVLong(); + } + + public ReplicationStats() { + + } + + public void add(ReplicationStats other) { + if (other != null) { + maxBytesBehind = Math.max(other.maxBytesBehind, maxBytesBehind); + totalBytesBehind += other.totalBytesBehind; + maxReplicationLag = Math.max(other.maxReplicationLag, maxReplicationLag); + } + } + + public long getMaxBytesBehind() { + return this.maxBytesBehind; + } + + public long getTotalBytesBehind() { + return this.totalBytesBehind; + } + + public long getMaxReplicationLag() { + return this.maxReplicationLag; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(maxBytesBehind); + out.writeVLong(totalBytesBehind); + out.writeVLong(maxReplicationLag); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.SEGMENT_REPLICATION); + builder.field(Fields.MAX_BYTES_BEHIND, maxBytesBehind); + builder.field(Fields.TOTAL_BYTES_BEHIND, totalBytesBehind); + builder.field(Fields.MAX_REPLICATION_LAG, maxReplicationLag); + builder.endObject(); + return builder; + } + + /** + * Fields for segment replication statistics + * + * @opensearch.internal + */ + static final class Fields { + static final String SEGMENT_REPLICATION = "segment_replication"; + static final String MAX_BYTES_BEHIND = "max_bytes_behind"; + static final String TOTAL_BYTES_BEHIND = "total_bytes_behind"; + static final String MAX_REPLICATION_LAG = "max_replication_lag"; + } +} diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java index 812d67777fb2c..55f9061b56198 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationPressureService.java @@ -42,7 +42,8 @@ public class SegmentReplicationPressureService implements Closeable { private volatile boolean isSegmentReplicationBackpressureEnabled; private volatile int maxCheckpointsBehind; private volatile double maxAllowedStaleReplicas; - private volatile TimeValue maxReplicationTime; + private volatile TimeValue replicationTimeLimitBackpressure; + private volatile TimeValue replicationTimeLimitFailReplica; private static final Logger logger = LogManager.getLogger(SegmentReplicationPressureService.class); @@ -65,13 +66,23 @@ public class SegmentReplicationPressureService implements Closeable { Setting.Property.NodeScope ); - public static final Setting MAX_REPLICATION_TIME_SETTING = Setting.positiveTimeSetting( + // Time limit on max allowed replica staleness after which backpressure kicks in on primary. + public static final Setting MAX_REPLICATION_TIME_BACKPRESSURE_SETTING = Setting.positiveTimeSetting( "segrep.pressure.time.limit", TimeValue.timeValueMinutes(5), Setting.Property.Dynamic, Setting.Property.NodeScope ); + // Time limit on max allowed replica staleness after which we start failing the replica shard. + // Defaults to 0(disabled) + public static final Setting MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING = Setting.positiveTimeSetting( + "segrep.replication.time.limit", + TimeValue.timeValueMinutes(0), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + public static final Setting MAX_ALLOWED_STALE_SHARDS = Setting.doubleSetting( "segrep.pressure.replica.stale.limit", .5, @@ -96,10 +107,11 @@ public SegmentReplicationPressureService( ClusterService clusterService, IndicesService indicesService, ShardStateAction shardStateAction, + SegmentReplicationStatsTracker tracker, ThreadPool threadPool ) { this.indicesService = indicesService; - this.tracker = new SegmentReplicationStatsTracker(this.indicesService); + this.tracker = tracker; this.shardStateAction = shardStateAction; this.threadPool = threadPool; @@ -114,8 +126,11 @@ public SegmentReplicationPressureService( this.maxCheckpointsBehind = MAX_INDEXING_CHECKPOINTS.get(settings); clusterSettings.addSettingsUpdateConsumer(MAX_INDEXING_CHECKPOINTS, this::setMaxCheckpointsBehind); - this.maxReplicationTime = MAX_REPLICATION_TIME_SETTING.get(settings); - clusterSettings.addSettingsUpdateConsumer(MAX_REPLICATION_TIME_SETTING, this::setMaxReplicationTime); + this.replicationTimeLimitBackpressure = MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING, this::setReplicationTimeLimitBackpressure); + + this.replicationTimeLimitFailReplica = MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING, this::setReplicationTimeLimitFailReplica); this.maxAllowedStaleReplicas = MAX_ALLOWED_STALE_SHARDS.get(settings); clusterSettings.addSettingsUpdateConsumer(MAX_ALLOWED_STALE_SHARDS, this::setMaxAllowedStaleReplicas); @@ -139,7 +154,7 @@ public void isSegrepLimitBreached(ShardId shardId) { } private void validateReplicationGroup(IndexShard shard) { - final Set replicaStats = shard.getReplicationStats(); + final Set replicaStats = shard.getReplicationStatsForTrackedReplicas(); final Set staleReplicas = getStaleReplicas(replicaStats); if (staleReplicas.isEmpty() == false) { // inSyncIds always considers the primary id, so filter it out. @@ -159,7 +174,7 @@ private void validateReplicationGroup(IndexShard shard) { private Set getStaleReplicas(final Set replicas) { return replicas.stream() .filter(entry -> entry.getCheckpointsBehindCount() > maxCheckpointsBehind) - .filter(entry -> entry.getCurrentReplicationTimeMillis() > maxReplicationTime.millis()) + .filter(entry -> entry.getCurrentReplicationTimeMillis() > replicationTimeLimitBackpressure.millis()) .collect(Collectors.toSet()); } @@ -187,8 +202,12 @@ public void setMaxAllowedStaleReplicas(double maxAllowedStaleReplicas) { this.maxAllowedStaleReplicas = maxAllowedStaleReplicas; } - public void setMaxReplicationTime(TimeValue maxReplicationTime) { - this.maxReplicationTime = maxReplicationTime; + public void setReplicationTimeLimitFailReplica(TimeValue replicationTimeLimitFailReplica) { + this.replicationTimeLimitFailReplica = replicationTimeLimitFailReplica; + } + + public void setReplicationTimeLimitBackpressure(TimeValue replicationTimeLimitBackpressure) { + this.replicationTimeLimitBackpressure = replicationTimeLimitBackpressure; } @Override @@ -216,7 +235,8 @@ protected boolean mustReschedule() { @Override protected void runInternal() { - if (pressureService.isSegmentReplicationBackpressureEnabled) { + // Do not fail the replicas if time limit is set to 0 (i.e. disabled). + if (TimeValue.ZERO.equals(pressureService.replicationTimeLimitFailReplica) == false) { final SegmentReplicationStats stats = pressureService.tracker.getStats(); // Find the shardId in node which is having stale replicas with highest current replication time. @@ -242,7 +262,7 @@ protected void runInternal() { } final IndexShard primaryShard = indexService.getShard(shardId.getId()); for (SegmentReplicationShardStats staleReplica : staleReplicas) { - if (staleReplica.getCurrentReplicationTimeMillis() > 2 * pressureService.maxReplicationTime.millis()) { + if (staleReplica.getCurrentReplicationTimeMillis() > pressureService.replicationTimeLimitFailReplica.millis()) { pressureService.shardStateAction.remoteShardFailed( shardId, staleReplica.getAllocationId(), diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationRejectionStats.java b/server/src/main/java/org/opensearch/index/SegmentReplicationRejectionStats.java new file mode 100644 index 0000000000000..492f253bbcb7c --- /dev/null +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationRejectionStats.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.Version; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Segment replication rejection stats. + * + * @opensearch.internal + */ +public class SegmentReplicationRejectionStats implements Writeable, ToXContentFragment { + + /** + * Total rejections due to segment replication backpressure + */ + private long totalRejectionCount; + + public SegmentReplicationRejectionStats(final long totalRejectionCount) { + this.totalRejectionCount = totalRejectionCount; + } + + public SegmentReplicationRejectionStats(StreamInput in) throws IOException { + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + this.totalRejectionCount = in.readVLong(); + } + } + + public long getTotalRejectionCount() { + return totalRejectionCount; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("segment_replication_backpressure"); + builder.field("total_rejected_requests", totalRejectionCount); + return builder.endObject(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeVLong(totalRejectionCount); + } + } + + @Override + public String toString() { + return "SegmentReplicationRejectionStats{ totalRejectedRequestCount=" + totalRejectionCount + '}'; + } + +} diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationShardStats.java b/server/src/main/java/org/opensearch/index/SegmentReplicationShardStats.java index 66bc2934f5391..2be0c712f64ef 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationShardStats.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationShardStats.java @@ -29,6 +29,10 @@ public class SegmentReplicationShardStats implements Writeable, ToXContentFragme private final String allocationId; private final long checkpointsBehindCount; private final long bytesBehindCount; + // Total Replication lag observed. + private final long currentReplicationLagMillis; + // Total time taken for replicas to catch up. Similar to replication lag except this + // doesn't include time taken by primary to upload data to remote store. private final long currentReplicationTimeMillis; private final long lastCompletedReplicationTimeMillis; @@ -40,12 +44,14 @@ public SegmentReplicationShardStats( long checkpointsBehindCount, long bytesBehindCount, long currentReplicationTimeMillis, + long currentReplicationLagMillis, long lastCompletedReplicationTime ) { this.allocationId = allocationId; this.checkpointsBehindCount = checkpointsBehindCount; this.bytesBehindCount = bytesBehindCount; this.currentReplicationTimeMillis = currentReplicationTimeMillis; + this.currentReplicationLagMillis = currentReplicationLagMillis; this.lastCompletedReplicationTimeMillis = lastCompletedReplicationTime; } @@ -55,6 +61,7 @@ public SegmentReplicationShardStats(StreamInput in) throws IOException { this.bytesBehindCount = in.readVLong(); this.currentReplicationTimeMillis = in.readVLong(); this.lastCompletedReplicationTimeMillis = in.readVLong(); + this.currentReplicationLagMillis = in.readVLong(); } public String getAllocationId() { @@ -73,6 +80,19 @@ public long getCurrentReplicationTimeMillis() { return currentReplicationTimeMillis; } + /** + * Total Replication lag observed. + * @return currentReplicationLagMillis + */ + public long getCurrentReplicationLagMillis() { + return currentReplicationLagMillis; + } + + /** + * Total time taken for replicas to catch up. Similar to replication lag except this doesn't include time taken by + * primary to upload data to remote store. + * @return lastCompletedReplicationTimeMillis + */ public long getLastCompletedReplicationTimeMillis() { return lastCompletedReplicationTimeMillis; } @@ -93,6 +113,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("checkpoints_behind", checkpointsBehindCount); builder.field("bytes_behind", new ByteSizeValue(bytesBehindCount).toString()); builder.field("current_replication_time", new TimeValue(currentReplicationTimeMillis)); + builder.field("current_replication_lag", new TimeValue(currentReplicationLagMillis)); builder.field("last_completed_replication_time", new TimeValue(lastCompletedReplicationTimeMillis)); if (currentReplicationState != null) { builder.startObject(); @@ -110,6 +131,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(bytesBehindCount); out.writeVLong(currentReplicationTimeMillis); out.writeVLong(lastCompletedReplicationTimeMillis); + out.writeVLong(currentReplicationLagMillis); } @Override @@ -121,6 +143,8 @@ public String toString() { + checkpointsBehindCount + ", bytesBehindCount=" + bytesBehindCount + + ", currentReplicationLagMillis=" + + currentReplicationLagMillis + ", currentReplicationTimeMillis=" + currentReplicationTimeMillis + ", lastCompletedReplicationTimeMillis=" diff --git a/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java b/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java index 2255bb17d364f..f5fc8aa1c1eea 100644 --- a/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java +++ b/server/src/main/java/org/opensearch/index/SegmentReplicationStatsTracker.java @@ -33,6 +33,14 @@ public SegmentReplicationStatsTracker(IndicesService indicesService) { rejectionCount = ConcurrentCollections.newConcurrentMap(); } + public SegmentReplicationRejectionStats getTotalRejectionStats() { + return new SegmentReplicationRejectionStats(this.rejectionCount.values().stream().mapToInt(AtomicInteger::get).sum()); + } + + protected Map getRejectionCount() { + return rejectionCount; + } + public SegmentReplicationStats getStats() { Map stats = new HashMap<>(); for (IndexService indexService : indicesService) { @@ -59,7 +67,7 @@ public void incrementRejectionCount(ShardId shardId) { public SegmentReplicationPerGroupStats getStatsForShard(IndexShard indexShard) { return new SegmentReplicationPerGroupStats( indexShard.shardId(), - indexShard.getReplicationStats(), + indexShard.getReplicationStatsForTrackedReplicas(), Optional.ofNullable(rejectionCount.get(indexShard.shardId())).map(AtomicInteger::get).orElse(0) ); } diff --git a/server/src/main/java/org/opensearch/index/ShardIndexingPressure.java b/server/src/main/java/org/opensearch/index/ShardIndexingPressure.java index 07768c6769b71..a6135186fb5ff 100644 --- a/server/src/main/java/org/opensearch/index/ShardIndexingPressure.java +++ b/server/src/main/java/org/opensearch/index/ShardIndexingPressure.java @@ -34,7 +34,7 @@ * Interfaces returns Releasable which when triggered will release the acquired accounting tokens values and also * perform necessary actions such as throughput evaluation once the request completes. * Consumers of these interfaces are expected to trigger close on releasable, reliably for consistency. - * + *

              * Overall ShardIndexingPressure provides: * 1. Memory Accounting at shard level. This can be enabled/disabled based on dynamic setting. * 2. Memory Accounting at Node level. Tracking is done using the IndexingPressure artefacts to support feature seamless toggling. diff --git a/server/src/main/java/org/opensearch/index/ShardIndexingPressureMemoryManager.java b/server/src/main/java/org/opensearch/index/ShardIndexingPressureMemoryManager.java index dea3cc8970cb5..e5c1af2e9c9f0 100644 --- a/server/src/main/java/org/opensearch/index/ShardIndexingPressureMemoryManager.java +++ b/server/src/main/java/org/opensearch/index/ShardIndexingPressureMemoryManager.java @@ -30,16 +30,16 @@ /** * The Shard Indexing Pressure Memory Manager is the construct responsible for increasing and decreasing the allocated shard limit * based on incoming requests. A shard limits defines the maximum memory that a shard can occupy in the heap for request objects. - * + *

              * Based on the overall memory utilization on the node, and current traffic needs shard limits will be modified: - * + *

              * 1. If the limits assigned to a shard is breached (Primary Parameter) while the node level overall occupancy across all shards * is not greater than primary_parameter.node.soft_limit, MemoryManager will increase the shard limits without any deeper evaluation. * 2. If the limits assigned to the shard is breached(Primary Parameter) and the node level overall occupancy across all shards * is greater than primary_parameter.node.soft_limit, then MemoryManager will evaluate deeper parameters for shards to identify any * issues, such as throughput degradation (Secondary Parameter - 1) and time since last request was successful (Secondary Parameter - 2). * This helps identify detect any duress state with the shard, requesting more memory. - * + *

              * Secondary Parameters covered above: * 1. ThroughputDegradationLimitsBreached - When the moving window throughput average has increased by a factor compared to * the historical throughput average. If the factor by which it has increased is greater than the degradation limit threshold, this @@ -47,7 +47,7 @@ * 2. LastSuccessfulRequestDurationLimitsBreached - When the time since the last successful request completed is greater than the max * timeout threshold value, while there a number of outstanding requests greater than the max outstanding requests then this parameter * is considered to be breached. - * + *

              * MemoryManager attempts to increase of decrease the shard limits in case the shard utilization goes below operating_factor.lower or * goes above operating_factor.upper of current shard limits. MemoryManager attempts to update the new shard limit such that the new value * remains withing the operating_factor.optimal range of current shard utilization. diff --git a/server/src/main/java/org/opensearch/index/ShardIndexingPressureStore.java b/server/src/main/java/org/opensearch/index/ShardIndexingPressureStore.java index b41dd1359394b..9b24d119f24fd 100644 --- a/server/src/main/java/org/opensearch/index/ShardIndexingPressureStore.java +++ b/server/src/main/java/org/opensearch/index/ShardIndexingPressureStore.java @@ -22,24 +22,24 @@ * Shard indexing pressure store acts as a central repository for all the shard-level tracker objects currently being * used at the Node level, for tracking indexing pressure requests. * Store manages the tracker lifecycle, from creation, access, until it is evicted to be collected. - * + *

              * Trackers are maintained at two levels for access simplicity and better memory management: - * + *

              * 1. shardIndexingPressureHotStore : As the name suggests, it is hot store for tracker objects which are currently live i.e. being used * to track an ongoing request. - * + *

              * 2. shardIndexingPressureColdStore : This acts as the store for all the shard tracking objects which are currently being used * by the framework. In addition to hot trackers, the recently used trackers which are although not currently live, but again can be used * in near future, are also part of this store. To limit any memory implications, this store has an upper limit on the maximum number of * trackers its can hold at any given time, which is a configurable dynamic setting. - * + *

              * Tracking objects when created are part of both the hot store as well as cold store. However, once the object * is no more live it is removed from the hot store. Objects in the cold store are evicted once the cold store * reaches its maximum limit. Think of it like a periodic purge when upper limit is hit. * During get if tracking object is not present in the hot store, a lookup is made into the cache store. If found, * object is brought into the hot store again, until it remains active. If not present in the either store, a fresh * object is instantiated and registered in both the stores for concurrent accesses. - * + *

              * Note: The implementation of shardIndexingPressureColdStore methods is such that get, * update and evict operations can be abstracted out to support any other strategy such as LRU, if * discovered a need later. diff --git a/server/src/main/java/org/opensearch/index/ShardIndexingPressureTracker.java b/server/src/main/java/org/opensearch/index/ShardIndexingPressureTracker.java index 7d67b47141ef5..e0edb8260fd0f 100644 --- a/server/src/main/java/org/opensearch/index/ShardIndexingPressureTracker.java +++ b/server/src/main/java/org/opensearch/index/ShardIndexingPressureTracker.java @@ -14,19 +14,19 @@ * This class is responsible for all the tracking that needs to be performed at every Shard Level for Indexing Operations on the node. * Info is maintained at the granularity of three kinds of write operation (tasks) on the node i.e. coordinating, primary and replica. * This is useful in evaluating the shard indexing back-pressure on the node, to throttle requests and also to publish runtime stats. - * + *

              * There can be four kinds of operation tracking on a node which needs to performed for a shard: * 1. Coordinating Operation : To track all the individual shard bulk request on the coordinator node. * 2. Primary Operation : To track all the individual shard bulk request on the primary node. * 3. Replica Operation : To track all the individual shard bulk request on the replica node. * 4. Common Operation : To track values applicable across the specific shard role. - * + *

              * ShardIndexingPressureTracker therefore provides the construct to track all the write requests targeted for a ShardId on the node, * across all possible transport-write-actions i.e. Coordinator, Primary and Replica. * Tracker is uniquely identified against a Shard-Id on the node. Currently the knowledge of shard roles (such as primary vs replica) * is not explicit to the tracker, and it is able to track different values simultaneously based on the interaction hooks of the * operation type i.e. write-action layers. - * + *

              * There is room for introducing more unique identity to the trackers based on Shard-Role or Shard-Allocation-Id, but that will also * increase the complexity of handling shard-lister events and handling other race scenarios such as request-draining etc. * To prefer simplicity we have modelled by keeping explicit fields for different operation tracking, while tracker by itself is diff --git a/server/src/main/java/org/opensearch/index/MergePolicyConfig.java b/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java similarity index 82% rename from server/src/main/java/org/opensearch/index/MergePolicyConfig.java rename to server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java index fe2af21dfe039..d5d354c6c960a 100644 --- a/server/src/main/java/org/opensearch/index/MergePolicyConfig.java +++ b/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java @@ -33,6 +33,7 @@ package org.opensearch.index; import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.TieredMergePolicy; @@ -47,9 +48,12 @@ * where the index data is stored, and are immutable up to delete markers. * Segments are, periodically, merged into larger segments to keep the * index size at bay and expunge deletes. + * This class customizes and exposes 2 merge policies from lucene - + * {@link LogByteSizeMergePolicy} and {@link TieredMergePolicy}. + * * *

              - * Merges select segments of approximately equal size, subject to an allowed + * Tiered merge policy select segments of approximately equal size, subject to an allowed * number of segments per tier. The merge policy is able to merge * non-adjacent segments, and separates how many segments are merged at once from how many * segments are allowed per tier. It also does not over-merge (i.e., cascade merges). @@ -125,8 +129,9 @@ * @opensearch.internal */ -public final class MergePolicyConfig { - private final OpenSearchTieredMergePolicy mergePolicy = new OpenSearchTieredMergePolicy(); +public final class TieredMergePolicyProvider implements MergePolicyProvider { + private final OpenSearchTieredMergePolicy tieredMergePolicy = new OpenSearchTieredMergePolicy(); + private final Logger logger; private final boolean mergesEnabled; @@ -137,10 +142,11 @@ public final class MergePolicyConfig { public static final double DEFAULT_SEGMENTS_PER_TIER = 10.0d; public static final double DEFAULT_RECLAIM_DELETES_WEIGHT = 2.0d; public static final double DEFAULT_DELETES_PCT_ALLOWED = 20.0d; + public static final Setting INDEX_COMPOUND_FORMAT_SETTING = new Setting<>( "index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), - MergePolicyConfig::parseNoCFSRatio, + TieredMergePolicyProvider::parseNoCFSRatio, Property.Dynamic, Property.IndexScope ); @@ -194,10 +200,8 @@ public final class MergePolicyConfig { Property.Dynamic, Property.IndexScope ); - // don't convert to Setting<> and register... we only set this in tests and register via a plugin - public static final String INDEX_MERGE_ENABLED = "index.merge.enabled"; - MergePolicyConfig(Logger logger, IndexSettings indexSettings) { + TieredMergePolicyProvider(Logger logger, IndexSettings indexSettings) { this.logger = logger; double forceMergeDeletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING); // percentage ByteSizeValue floorSegment = indexSettings.getValue(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING); @@ -216,54 +220,41 @@ public final class MergePolicyConfig { ); } maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier); - mergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING)); - mergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed); - mergePolicy.setFloorSegmentMB(floorSegment.getMbFrac()); - mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); - mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); - mergePolicy.setSegmentsPerTier(segmentsPerTier); - mergePolicy.setDeletesPctAllowed(deletesPctAllowed); - if (logger.isTraceEnabled()) { - logger.trace( - "using [tiered] merge mergePolicy with expunge_deletes_allowed[{}], floor_segment[{}]," - + " max_merge_at_once[{}], max_merged_segment[{}], segments_per_tier[{}]," - + " deletes_pct_allowed[{}]", - forceMergeDeletesPctAllowed, - floorSegment, - maxMergeAtOnce, - maxMergedSegment, - segmentsPerTier, - deletesPctAllowed - ); - } + tieredMergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING)); + tieredMergePolicy.setForceMergeDeletesPctAllowed(forceMergeDeletesPctAllowed); + tieredMergePolicy.setFloorSegmentMB(floorSegment.getMbFrac()); + tieredMergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); + tieredMergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); + tieredMergePolicy.setSegmentsPerTier(segmentsPerTier); + tieredMergePolicy.setDeletesPctAllowed(deletesPctAllowed); } void setSegmentsPerTier(Double segmentsPerTier) { - mergePolicy.setSegmentsPerTier(segmentsPerTier); + tieredMergePolicy.setSegmentsPerTier(segmentsPerTier); } void setMaxMergedSegment(ByteSizeValue maxMergedSegment) { - mergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); + tieredMergePolicy.setMaxMergedSegmentMB(maxMergedSegment.getMbFrac()); } void setMaxMergesAtOnce(Integer maxMergeAtOnce) { - mergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); + tieredMergePolicy.setMaxMergeAtOnce(maxMergeAtOnce); } void setFloorSegmentSetting(ByteSizeValue floorSegementSetting) { - mergePolicy.setFloorSegmentMB(floorSegementSetting.getMbFrac()); + tieredMergePolicy.setFloorSegmentMB(floorSegementSetting.getMbFrac()); } void setExpungeDeletesAllowed(Double value) { - mergePolicy.setForceMergeDeletesPctAllowed(value); + tieredMergePolicy.setForceMergeDeletesPctAllowed(value); } void setNoCFSRatio(Double noCFSRatio) { - mergePolicy.setNoCFSRatio(noCFSRatio); + tieredMergePolicy.setNoCFSRatio(noCFSRatio); } void setDeletesPctAllowed(Double deletesPctAllowed) { - mergePolicy.setDeletesPctAllowed(deletesPctAllowed); + tieredMergePolicy.setDeletesPctAllowed(deletesPctAllowed); } private int adjustMaxMergeAtOnceIfNeeded(int maxMergeAtOnce, double segmentsPerTier) { @@ -285,11 +276,11 @@ private int adjustMaxMergeAtOnceIfNeeded(int maxMergeAtOnce, double segmentsPerT return maxMergeAtOnce; } - MergePolicy getMergePolicy() { - return mergesEnabled ? mergePolicy : NoMergePolicy.INSTANCE; + public MergePolicy getMergePolicy() { + return mergesEnabled ? tieredMergePolicy : NoMergePolicy.INSTANCE; } - private static double parseNoCFSRatio(String noCFSRatio) { + public static double parseNoCFSRatio(String noCFSRatio) { noCFSRatio = noCFSRatio.trim(); if (noCFSRatio.equalsIgnoreCase("true")) { return 1.0d; @@ -310,4 +301,23 @@ private static double parseNoCFSRatio(String noCFSRatio) { } } } + + @Override + public String toString() { + return "TieredMergePolicyProvider{" + + "expungeDeletesAllowed=" + + tieredMergePolicy.getForceMergeDeletesPctAllowed() + + ", floorSegment=" + + tieredMergePolicy.getFloorSegmentMB() + + ", maxMergeAtOnce=" + + tieredMergePolicy.getMaxMergeAtOnce() + + ", maxMergedSegment=" + + tieredMergePolicy.getMaxMergedSegmentMB() + + ", segmentsPerTier=" + + tieredMergePolicy.getSegmentsPerTier() + + ", deletesPctAllowed=" + + tieredMergePolicy.getDeletesPctAllowed() + + '}'; + } + } diff --git a/server/src/main/java/org/opensearch/index/VersionType.java b/server/src/main/java/org/opensearch/index/VersionType.java index 3ce7f600a6a5b..8aa1fbd1b81ac 100644 --- a/server/src/main/java/org/opensearch/index/VersionType.java +++ b/server/src/main/java/org/opensearch/index/VersionType.java @@ -244,7 +244,7 @@ public byte getValue() { /** * Returns a human readable explanation for a version conflict on write. - * + *

              * Note that this method is only called if {@link #isVersionConflictForWrites(long, long, boolean)} returns true; * * @param currentVersion the current version for the document @@ -265,7 +265,7 @@ public byte getValue() { /** * Returns a human readable explanation for a version conflict on read. - * + *

              * Note that this method is only called if {@link #isVersionConflictForReads(long, long)} returns true; * * @param currentVersion the current version for the document diff --git a/server/src/main/java/org/opensearch/index/analysis/Analysis.java b/server/src/main/java/org/opensearch/index/analysis/Analysis.java index 0062e4d8fbe05..6294230a02ada 100644 --- a/server/src/main/java/org/opensearch/index/analysis/Analysis.java +++ b/server/src/main/java/org/opensearch/index/analysis/Analysis.java @@ -87,6 +87,7 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.regex.Pattern; import static java.util.Collections.unmodifiableMap; @@ -98,6 +99,9 @@ public class Analysis { private static final Logger LOGGER = LogManager.getLogger(Analysis.class); + // Regular expression to support hashtag tokenization + private static final Pattern HASH_TAG_RULE_PATTERN = Pattern.compile("^\\s*#\\s*=>"); + public static CharArraySet parseStemExclusion(Settings settings, CharArraySet defaultStemExclusion) { String value = settings.get("stem_exclusion"); if ("_none_".equals(value)) { @@ -222,16 +226,6 @@ public static List parseWordList(Environment env, Settings settings, Stri return parseWordList(env, settings, settingPrefix + "_path", settingPrefix, parser); } - public static List parseWordList( - Environment env, - Settings settings, - String settingPrefix, - CustomMappingRuleParser parser, - boolean removeComments - ) { - return parseWordList(env, settings, settingPrefix + "_path", settingPrefix, parser, removeComments); - } - /** * Parses a list of words from the specified settings or from a file, with the given parser. * @@ -246,17 +240,6 @@ public static List parseWordList( String settingPath, String settingList, CustomMappingRuleParser parser - ) { - return parseWordList(env, settings, settingPath, settingList, parser, true); - } - - public static List parseWordList( - Environment env, - Settings settings, - String settingPath, - String settingList, - CustomMappingRuleParser parser, - boolean removeComments ) { List words = getWordList(env, settings, settingPath, settingList); if (words == null) { @@ -266,7 +249,7 @@ public static List parseWordList( int lineNum = 0; for (String word : words) { lineNum++; - if (removeComments == false || word.startsWith("#") == false) { + if (word.startsWith("#") == false || HASH_TAG_RULE_PATTERN.matcher(word).find() == true) { try { rules.add(parser.apply(word)); } catch (RuntimeException ex) { diff --git a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java index 4f11eacb3178a..6e84264ced803 100644 --- a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java @@ -247,7 +247,7 @@ public IndexAnalyzers build(IndexSettings indexSettings) throws IOException { /** * Creates a custom analyzer from a collection of {@link NameOrDefinition} specifications for each component - * + *

              * Callers are responsible for closing the returned Analyzer */ public NamedAnalyzer buildCustomAnalyzer( diff --git a/server/src/main/java/org/opensearch/index/analysis/NormalizingCharFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/NormalizingCharFilterFactory.java index ab8d23339029c..30fe31105e1d9 100644 --- a/server/src/main/java/org/opensearch/index/analysis/NormalizingCharFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/NormalizingCharFilterFactory.java @@ -36,7 +36,7 @@ /** * A CharFilterFactory that also supports normalization - * + *

              * The default implementation of {@link #normalize(Reader)} delegates to * {@link #create(Reader)} * diff --git a/server/src/main/java/org/opensearch/index/analysis/NormalizingTokenFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/NormalizingTokenFilterFactory.java index be761aee0d36c..2ed621cdd22b1 100644 --- a/server/src/main/java/org/opensearch/index/analysis/NormalizingTokenFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/NormalizingTokenFilterFactory.java @@ -36,7 +36,7 @@ /** * A TokenFilterFactory that may be used for normalization - * + *

              * The default implementation delegates {@link #normalize(TokenStream)} to * {@link #create(TokenStream)}}. * diff --git a/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java b/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java index a65e1898bea0d..8719d127781e0 100644 --- a/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/PreBuiltAnalyzerProviderFactory.java @@ -102,7 +102,7 @@ public void close() throws IOException { /** * A special cache that closes the gap between PreBuiltAnalyzers and PreBuiltAnalyzerProviderFactory. - * + *

              * This can be removed when all analyzers have been moved away from PreBuiltAnalyzers to * PreBuiltAnalyzerProviderFactory either in server or analysis-common. * diff --git a/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java index 701a9302fc164..fd29dc4b8992f 100644 --- a/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java @@ -178,11 +178,11 @@ public TokenStream create(TokenStream tokenStream) { filter.setTokenSeparator(tokenSeparator); filter.setFillerToken(fillerToken); if (outputUnigrams || (minShingleSize != maxShingleSize)) { - /** - * We disable the graph analysis on this token stream - * because it produces shingles of different size. - * Graph analysis on such token stream is useless and dangerous as it may create too many paths - * since shingles of different size are not aligned in terms of positions. + /* + We disable the graph analysis on this token stream + because it produces shingles of different size. + Graph analysis on such token stream is useless and dangerous as it may create too many paths + since shingles of different size are not aligned in terms of positions. */ filter.addAttribute(DisableGraphAttribute.class); } diff --git a/server/src/main/java/org/opensearch/index/analysis/TokenFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/TokenFilterFactory.java index 1b9d781b177ce..6708db8571b2a 100644 --- a/server/src/main/java/org/opensearch/index/analysis/TokenFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/TokenFilterFactory.java @@ -51,7 +51,7 @@ public interface TokenFilterFactory { /** * Normalize a tokenStream for use in multi-term queries - * + *

              * The default implementation is a no-op */ default TokenStream normalize(TokenStream tokenStream) { @@ -86,7 +86,7 @@ default TokenFilterFactory getChainAwareTokenFilterFactory( /** * Return a version of this TokenFilterFactory appropriate for synonym parsing - * + *

              * Filters that should not be applied to synonyms (for example, those that produce * multiple tokens) should throw an exception * diff --git a/server/src/main/java/org/opensearch/index/codec/CodecAliases.java b/server/src/main/java/org/opensearch/index/codec/CodecAliases.java new file mode 100644 index 0000000000000..066c092e86db8 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/CodecAliases.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec; + +import org.apache.lucene.codecs.Codec; +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Set; + +/** + * This {@link CodecAliases} to provide aliases for the {@link Codec}. + * + * @opensearch.internal + */ +@ExperimentalApi +public interface CodecAliases { + + /** + * Retrieves a set of aliases for an codec. + * + * @return A non-null set of alias strings. If no aliases are available, an empty set should be returned. + */ + default Set aliases() { + return Set.of(); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/CodecService.java b/server/src/main/java/org/opensearch/index/codec/CodecService.java index 54feb446fdb40..9b57fe64cbeab 100644 --- a/server/src/main/java/org/opensearch/index/codec/CodecService.java +++ b/server/src/main/java/org/opensearch/index/codec/CodecService.java @@ -39,15 +39,10 @@ import org.opensearch.common.Nullable; import org.opensearch.common.collect.MapBuilder; import org.opensearch.index.IndexSettings; -import org.opensearch.index.codec.customcodecs.Lucene95CustomCodec; -import org.opensearch.index.codec.customcodecs.ZstdCodec; -import org.opensearch.index.codec.customcodecs.ZstdNoDictCodec; import org.opensearch.index.mapper.MapperService; import java.util.Map; -import static org.opensearch.index.engine.EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING; - /** * Since Lucene 4.0 low level index segments are read and written through a * codec layer that allows to use use-case specific file formats & @@ -68,27 +63,20 @@ public class CodecService { * the raw unfiltered lucene default. useful for testing */ public static final String LUCENE_DEFAULT_CODEC = "lucene_default"; - public static final String ZSTD_CODEC = "zstd"; - public static final String ZSTD_NO_DICT_CODEC = "zstd_no_dict"; public CodecService(@Nullable MapperService mapperService, IndexSettings indexSettings, Logger logger) { final MapBuilder codecs = MapBuilder.newMapBuilder(); assert null != indexSettings; - int compressionLevel = indexSettings.getValue(INDEX_CODEC_COMPRESSION_LEVEL_SETTING); if (mapperService == null) { codecs.put(DEFAULT_CODEC, new Lucene95Codec()); codecs.put(LZ4, new Lucene95Codec()); codecs.put(BEST_COMPRESSION_CODEC, new Lucene95Codec(Mode.BEST_COMPRESSION)); codecs.put(ZLIB, new Lucene95Codec(Mode.BEST_COMPRESSION)); - codecs.put(ZSTD_CODEC, new ZstdCodec(compressionLevel)); - codecs.put(ZSTD_NO_DICT_CODEC, new ZstdNoDictCodec(compressionLevel)); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); codecs.put(LZ4, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); codecs.put(BEST_COMPRESSION_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); codecs.put(ZLIB, new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); - codecs.put(ZSTD_CODEC, new ZstdCodec(mapperService, logger, compressionLevel)); - codecs.put(ZSTD_NO_DICT_CODEC, new ZstdNoDictCodec(mapperService, logger, compressionLevel)); } codecs.put(LUCENE_DEFAULT_CODEC, Codec.getDefault()); for (String codec : Codec.availableCodecs()) { @@ -103,17 +91,10 @@ public CodecService(@Nullable MapperService mapperService, Logger logger) { if (mapperService == null) { codecs.put(DEFAULT_CODEC, new Lucene95Codec()); codecs.put(BEST_COMPRESSION_CODEC, new Lucene95Codec(Mode.BEST_COMPRESSION)); - codecs.put(ZSTD_CODEC, new ZstdCodec()); - codecs.put(ZSTD_NO_DICT_CODEC, new ZstdNoDictCodec()); } else { IndexSettings indexSettings = mapperService.getIndexSettings(); - int compressionLevel = indexSettings == null - ? Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL - : indexSettings.getValue(INDEX_CODEC_COMPRESSION_LEVEL_SETTING); codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); codecs.put(BEST_COMPRESSION_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); - codecs.put(ZSTD_CODEC, new ZstdCodec(mapperService, logger, compressionLevel)); - codecs.put(ZSTD_NO_DICT_CODEC, new ZstdNoDictCodec(mapperService, logger, compressionLevel)); } codecs.put(LUCENE_DEFAULT_CODEC, Codec.getDefault()); for (String codec : Codec.availableCodecs()) { diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java deleted file mode 100644 index 8aa422a47a073..0000000000000 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.logging.log4j.Logger; -import org.apache.lucene.codecs.FilterCodec; -import org.apache.lucene.codecs.StoredFieldsFormat; -import org.apache.lucene.codecs.lucene95.Lucene95Codec; -import org.opensearch.index.codec.PerFieldMappingPostingFormatCodec; -import org.opensearch.index.mapper.MapperService; - -/** - * - * Extends {@link FilterCodec} to reuse the functionality of Lucene Codec. - * Supports two modes zstd and zstd_no_dict. - * - * @opensearch.internal - */ -public abstract class Lucene95CustomCodec extends FilterCodec { - public static final int DEFAULT_COMPRESSION_LEVEL = 3; - - /** Each mode represents a compression algorithm. */ - public enum Mode { - ZSTD, - ZSTD_NO_DICT - } - - private final StoredFieldsFormat storedFieldsFormat; - - /** - * Creates a new compression codec with the default compression level. - * - * @param mode The compression codec (ZSTD or ZSTDNODICT). - */ - public Lucene95CustomCodec(Mode mode) { - this(mode, DEFAULT_COMPRESSION_LEVEL); - } - - /** - * Creates a new compression codec with the given compression level. We use - * lowercase letters when registering the codec so that we remain consistent with - * the other compression codecs: default, lucene_default, and best_compression. - * - * @param mode The compression codec (ZSTD or ZSTDNODICT). - * @param compressionLevel The compression level. - */ - public Lucene95CustomCodec(Mode mode, int compressionLevel) { - super("Lucene95CustomCodec", new Lucene95Codec()); - this.storedFieldsFormat = new Lucene95CustomStoredFieldsFormat(mode, compressionLevel); - } - - public Lucene95CustomCodec(Mode mode, int compressionLevel, MapperService mapperService, Logger logger) { - super("Lucene95CustomCodec", new PerFieldMappingPostingFormatCodec(Lucene95Codec.Mode.BEST_SPEED, mapperService, logger)); - this.storedFieldsFormat = new Lucene95CustomStoredFieldsFormat(mode, compressionLevel); - } - - @Override - public StoredFieldsFormat storedFieldsFormat() { - return storedFieldsFormat; - } - - @Override - public String toString() { - return getClass().getSimpleName(); - } -} diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java deleted file mode 100644 index 2816e2907a5f6..0000000000000 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormat.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.lucene.codecs.StoredFieldsFormat; -import org.apache.lucene.codecs.StoredFieldsReader; -import org.apache.lucene.codecs.StoredFieldsWriter; -import org.apache.lucene.codecs.compressing.CompressionMode; -import org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingStoredFieldsFormat; -import org.apache.lucene.index.FieldInfos; -import org.apache.lucene.index.SegmentInfo; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.IOContext; - -import java.io.IOException; -import java.util.Objects; - -/** Stored field format used by pluggable codec */ -public class Lucene95CustomStoredFieldsFormat extends StoredFieldsFormat { - - /** A key that we use to map to a mode */ - public static final String MODE_KEY = Lucene95CustomStoredFieldsFormat.class.getSimpleName() + ".mode"; - - private static final int ZSTD_BLOCK_LENGTH = 10 * 48 * 1024; - private static final int ZSTD_MAX_DOCS_PER_BLOCK = 4096; - private static final int ZSTD_BLOCK_SHIFT = 10; - - private final CompressionMode zstdCompressionMode; - private final CompressionMode zstdNoDictCompressionMode; - - private final Lucene95CustomCodec.Mode mode; - private final int compressionLevel; - - /** default constructor */ - public Lucene95CustomStoredFieldsFormat() { - this(Lucene95CustomCodec.Mode.ZSTD, Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL); - } - - /** - * Creates a new instance. - * - * @param mode The mode represents ZSTD or ZSTDNODICT - */ - public Lucene95CustomStoredFieldsFormat(Lucene95CustomCodec.Mode mode) { - this(mode, Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL); - } - - /** - * Creates a new instance with the specified mode and compression level. - * - * @param mode The mode represents ZSTD or ZSTDNODICT - * @param compressionLevel The compression level for the mode. - */ - public Lucene95CustomStoredFieldsFormat(Lucene95CustomCodec.Mode mode, int compressionLevel) { - this.mode = Objects.requireNonNull(mode); - this.compressionLevel = compressionLevel; - zstdCompressionMode = new ZstdCompressionMode(compressionLevel); - zstdNoDictCompressionMode = new ZstdNoDictCompressionMode(compressionLevel); - } - - /** - * Returns a {@link StoredFieldsReader} to load stored fields. - * @param directory The index directory. - * @param si The SegmentInfo that stores segment information. - * @param fn The fieldInfos. - * @param context The IOContext that holds additional details on the merge/search context. - */ - @Override - public StoredFieldsReader fieldsReader(Directory directory, SegmentInfo si, FieldInfos fn, IOContext context) throws IOException { - String value = si.getAttribute(MODE_KEY); - if (value == null) { - throw new IllegalStateException("missing value for " + MODE_KEY + " for segment: " + si.name); - } - Lucene95CustomCodec.Mode mode = Lucene95CustomCodec.Mode.valueOf(value); - return impl(mode).fieldsReader(directory, si, fn, context); - } - - /** - * Returns a {@link StoredFieldsReader} to write stored fields. - * @param directory The index directory. - * @param si The SegmentInfo that stores segment information. - * @param context The IOContext that holds additional details on the merge/search context. - */ - @Override - public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOContext context) throws IOException { - String previous = si.putAttribute(MODE_KEY, mode.name()); - if (previous != null && previous.equals(mode.name()) == false) { - throw new IllegalStateException( - "found existing value for " + MODE_KEY + " for segment: " + si.name + " old = " + previous + ", new = " + mode.name() - ); - } - return impl(mode).fieldsWriter(directory, si, context); - } - - StoredFieldsFormat impl(Lucene95CustomCodec.Mode mode) { - switch (mode) { - case ZSTD: - return new Lucene90CompressingStoredFieldsFormat( - "CustomStoredFieldsZstd", - zstdCompressionMode, - ZSTD_BLOCK_LENGTH, - ZSTD_MAX_DOCS_PER_BLOCK, - ZSTD_BLOCK_SHIFT - ); - case ZSTD_NO_DICT: - return new Lucene90CompressingStoredFieldsFormat( - "CustomStoredFieldsZstdNoDict", - zstdNoDictCompressionMode, - ZSTD_BLOCK_LENGTH, - ZSTD_MAX_DOCS_PER_BLOCK, - ZSTD_BLOCK_SHIFT - ); - default: - throw new AssertionError(); - } - } - - Lucene95CustomCodec.Mode getMode() { - return mode; - } - - public int getCompressionLevel() { - return compressionLevel; - } -} diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java deleted file mode 100644 index 042f7eaa29e53..0000000000000 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.logging.log4j.Logger; -import org.opensearch.common.settings.Setting; -import org.opensearch.index.codec.CodecSettings; -import org.opensearch.index.engine.EngineConfig; -import org.opensearch.index.mapper.MapperService; - -/** - * ZstdCodec provides ZSTD compressor using the zstd-jni library. - */ -public class ZstdCodec extends Lucene95CustomCodec implements CodecSettings { - - /** - * Creates a new ZstdCodec instance with the default compression level. - */ - public ZstdCodec() { - this(DEFAULT_COMPRESSION_LEVEL); - } - - /** - * Creates a new ZstdCodec instance. - * - * @param compressionLevel The compression level. - */ - public ZstdCodec(int compressionLevel) { - super(Mode.ZSTD, compressionLevel); - } - - public ZstdCodec(MapperService mapperService, Logger logger, int compressionLevel) { - super(Mode.ZSTD, compressionLevel, mapperService, logger); - } - - /** The name for this codec. */ - @Override - public String toString() { - return getClass().getSimpleName(); - } - - @Override - public boolean supports(Setting setting) { - return setting.equals(EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING); - } -} diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java deleted file mode 100644 index 05ff725933e1a..0000000000000 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCompressionMode.java +++ /dev/null @@ -1,210 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import com.github.luben.zstd.Zstd; -import com.github.luben.zstd.ZstdCompressCtx; -import com.github.luben.zstd.ZstdDecompressCtx; -import com.github.luben.zstd.ZstdDictCompress; -import com.github.luben.zstd.ZstdDictDecompress; - -import org.apache.lucene.codecs.compressing.CompressionMode; -import org.apache.lucene.codecs.compressing.Compressor; -import org.apache.lucene.codecs.compressing.Decompressor; -import org.apache.lucene.store.ByteBuffersDataInput; -import org.apache.lucene.store.DataInput; -import org.apache.lucene.store.DataOutput; -import org.apache.lucene.util.ArrayUtil; -import org.apache.lucene.util.BytesRef; - -import java.io.IOException; - -/** Zstandard Compression Mode */ -public class ZstdCompressionMode extends CompressionMode { - - private static final int NUM_SUB_BLOCKS = 10; - private static final int DICT_SIZE_FACTOR = 6; - private static final int DEFAULT_COMPRESSION_LEVEL = 6; - - private final int compressionLevel; - - /** default constructor */ - protected ZstdCompressionMode() { - this.compressionLevel = DEFAULT_COMPRESSION_LEVEL; - } - - /** - * Creates a new instance. - * - * @param compressionLevel The compression level to use. - */ - protected ZstdCompressionMode(int compressionLevel) { - this.compressionLevel = compressionLevel; - } - - /** Creates a new compressor instance.*/ - @Override - public Compressor newCompressor() { - return new ZstdCompressor(compressionLevel); - } - - /** Creates a new decompressor instance. */ - @Override - public Decompressor newDecompressor() { - return new ZstdDecompressor(); - } - - /** zstandard compressor */ - private static final class ZstdCompressor extends Compressor { - - private final int compressionLevel; - private byte[] compressedBuffer; - - /** compressor with a given compresion level */ - public ZstdCompressor(int compressionLevel) { - this.compressionLevel = compressionLevel; - compressedBuffer = BytesRef.EMPTY_BYTES; - } - - /*resuable compress function*/ - private void doCompress(byte[] bytes, int offset, int length, ZstdCompressCtx cctx, DataOutput out) throws IOException { - if (length == 0) { - out.writeVInt(0); - return; - } - final int maxCompressedLength = (int) Zstd.compressBound(length); - compressedBuffer = ArrayUtil.growNoCopy(compressedBuffer, maxCompressedLength); - - int compressedSize = cctx.compressByteArray(compressedBuffer, 0, compressedBuffer.length, bytes, offset, length); - - out.writeVInt(compressedSize); - out.writeBytes(compressedBuffer, compressedSize); - } - - private void compress(byte[] bytes, int offset, int length, DataOutput out) throws IOException { - assert offset >= 0 : "offset value must be greater than 0"; - - final int dictLength = length / (NUM_SUB_BLOCKS * DICT_SIZE_FACTOR); - final int blockLength = (length - dictLength + NUM_SUB_BLOCKS - 1) / NUM_SUB_BLOCKS; - out.writeVInt(dictLength); - out.writeVInt(blockLength); - - final int end = offset + length; - assert end >= 0 : "buffer read size must be greater than 0"; - - try (ZstdCompressCtx cctx = new ZstdCompressCtx()) { - cctx.setLevel(compressionLevel); - - // dictionary compression first - doCompress(bytes, offset, dictLength, cctx, out); - try (ZstdDictCompress dictCompress = new ZstdDictCompress(bytes, offset, dictLength, compressionLevel)) { - cctx.loadDict(dictCompress); - - for (int start = offset + dictLength; start < end; start += blockLength) { - int l = Math.min(blockLength, end - start); - doCompress(bytes, start, l, cctx, out); - } - } - } - } - - @Override - public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException { - final int length = (int) buffersInput.size(); - byte[] bytes = new byte[length]; - buffersInput.readBytes(bytes, 0, length); - compress(bytes, 0, length, out); - } - - @Override - public void close() throws IOException {} - } - - /** zstandard decompressor */ - private static final class ZstdDecompressor extends Decompressor { - - private byte[] compressedBuffer; - - /** default decompressor */ - public ZstdDecompressor() { - compressedBuffer = BytesRef.EMPTY_BYTES; - } - - /*resuable decompress function*/ - private void doDecompress(DataInput in, ZstdDecompressCtx dctx, BytesRef bytes, int decompressedLen) throws IOException { - final int compressedLength = in.readVInt(); - if (compressedLength == 0) { - return; - } - - compressedBuffer = ArrayUtil.growNoCopy(compressedBuffer, compressedLength); - in.readBytes(compressedBuffer, 0, compressedLength); - - bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + decompressedLen); - int uncompressed = dctx.decompressByteArray(bytes.bytes, bytes.length, decompressedLen, compressedBuffer, 0, compressedLength); - - if (decompressedLen != uncompressed) { - throw new IllegalStateException(decompressedLen + " " + uncompressed); - } - bytes.length += uncompressed; - } - - @Override - public void decompress(DataInput in, int originalLength, int offset, int length, BytesRef bytes) throws IOException { - assert offset + length <= originalLength : "buffer read size must be within limit"; - - if (length == 0) { - bytes.length = 0; - return; - } - final int dictLength = in.readVInt(); - final int blockLength = in.readVInt(); - bytes.bytes = ArrayUtil.growNoCopy(bytes.bytes, dictLength); - bytes.offset = bytes.length = 0; - - try (ZstdDecompressCtx dctx = new ZstdDecompressCtx()) { - - // decompress dictionary first - doDecompress(in, dctx, bytes, dictLength); - try (ZstdDictDecompress dictDecompress = new ZstdDictDecompress(bytes.bytes, 0, dictLength)) { - dctx.loadDict(dictDecompress); - - int offsetInBlock = dictLength; - int offsetInBytesRef = offset; - - // Skip unneeded blocks - while (offsetInBlock + blockLength < offset) { - final int compressedLength = in.readVInt(); - in.skipBytes(compressedLength); - offsetInBlock += blockLength; - offsetInBytesRef -= blockLength; - } - - // Read blocks that intersect with the interval we need - while (offsetInBlock < offset + length) { - bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + blockLength); - int l = Math.min(blockLength, originalLength - offsetInBlock); - doDecompress(in, dctx, bytes, l); - offsetInBlock += blockLength; - } - - bytes.offset = offsetInBytesRef; - bytes.length = length; - - assert bytes.isValid() : "decompression output is corrupted"; - } - } - } - - @Override - public Decompressor clone() { - return new ZstdDecompressor(); - } - } -} diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java deleted file mode 100644 index a7e8e0e42ee68..0000000000000 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.logging.log4j.Logger; -import org.opensearch.common.settings.Setting; -import org.opensearch.index.codec.CodecSettings; -import org.opensearch.index.engine.EngineConfig; -import org.opensearch.index.mapper.MapperService; - -/** - * ZstdNoDictCodec provides ZSTD compressor without a dictionary support. - */ -public class ZstdNoDictCodec extends Lucene95CustomCodec implements CodecSettings { - - /** - * Creates a new ZstdNoDictCodec instance with the default compression level. - */ - public ZstdNoDictCodec() { - this(DEFAULT_COMPRESSION_LEVEL); - } - - /** - * Creates a new ZstdNoDictCodec instance. - * - * @param compressionLevel The compression level. - */ - public ZstdNoDictCodec(int compressionLevel) { - super(Mode.ZSTD_NO_DICT, compressionLevel); - } - - public ZstdNoDictCodec(MapperService mapperService, Logger logger, int compressionLevel) { - super(Mode.ZSTD_NO_DICT, compressionLevel, mapperService, logger); - } - - /** The name for this codec. */ - @Override - public String toString() { - return getClass().getSimpleName(); - } - - @Override - public boolean supports(Setting setting) { - return setting.equals(EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING); - } -} diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java deleted file mode 100644 index af4e92b78ed0f..0000000000000 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressionMode.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import com.github.luben.zstd.Zstd; - -import org.apache.lucene.codecs.compressing.CompressionMode; -import org.apache.lucene.codecs.compressing.Compressor; -import org.apache.lucene.codecs.compressing.Decompressor; -import org.apache.lucene.store.ByteBuffersDataInput; -import org.apache.lucene.store.DataInput; -import org.apache.lucene.store.DataOutput; -import org.apache.lucene.util.ArrayUtil; -import org.apache.lucene.util.BytesRef; - -import java.io.IOException; - -/** ZSTD Compression Mode (without a dictionary support). */ -public class ZstdNoDictCompressionMode extends CompressionMode { - - private static final int NUM_SUB_BLOCKS = 10; - private static final int DEFAULT_COMPRESSION_LEVEL = 6; - - private final int compressionLevel; - - /** default constructor */ - protected ZstdNoDictCompressionMode() { - this.compressionLevel = DEFAULT_COMPRESSION_LEVEL; - } - - /** - * Creates a new instance with the given compression level. - * - * @param compressionLevel The compression level. - */ - protected ZstdNoDictCompressionMode(int compressionLevel) { - this.compressionLevel = compressionLevel; - } - - /** Creates a new compressor instance.*/ - @Override - public Compressor newCompressor() { - return new ZstdCompressor(compressionLevel); - } - - /** Creates a new decompressor instance. */ - @Override - public Decompressor newDecompressor() { - return new ZstdDecompressor(); - } - - /** zstandard compressor */ - private static final class ZstdCompressor extends Compressor { - - private final int compressionLevel; - private byte[] compressedBuffer; - - /** compressor with a given compresion level */ - public ZstdCompressor(int compressionLevel) { - this.compressionLevel = compressionLevel; - compressedBuffer = BytesRef.EMPTY_BYTES; - } - - private void compress(byte[] bytes, int offset, int length, DataOutput out) throws IOException { - assert offset >= 0 : "offset value must be greater than 0"; - - int blockLength = (length + NUM_SUB_BLOCKS - 1) / NUM_SUB_BLOCKS; - out.writeVInt(blockLength); - - final int end = offset + length; - assert end >= 0 : "buffer read size must be greater than 0"; - - for (int start = offset; start < end; start += blockLength) { - int l = Math.min(blockLength, end - start); - - if (l == 0) { - out.writeVInt(0); - return; - } - - final int maxCompressedLength = (int) Zstd.compressBound(l); - compressedBuffer = ArrayUtil.growNoCopy(compressedBuffer, maxCompressedLength); - - int compressedSize = (int) Zstd.compressByteArray( - compressedBuffer, - 0, - compressedBuffer.length, - bytes, - start, - l, - compressionLevel - ); - - out.writeVInt(compressedSize); - out.writeBytes(compressedBuffer, compressedSize); - } - } - - @Override - public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException { - final int length = (int) buffersInput.size(); - byte[] bytes = new byte[length]; - buffersInput.readBytes(bytes, 0, length); - compress(bytes, 0, length, out); - } - - @Override - public void close() throws IOException {} - } - - /** zstandard decompressor */ - private static final class ZstdDecompressor extends Decompressor { - - private byte[] compressed; - - /** default decompressor */ - public ZstdDecompressor() { - compressed = BytesRef.EMPTY_BYTES; - } - - @Override - public void decompress(DataInput in, int originalLength, int offset, int length, BytesRef bytes) throws IOException { - assert offset + length <= originalLength : "buffer read size must be within limit"; - - if (length == 0) { - bytes.length = 0; - return; - } - - final int blockLength = in.readVInt(); - bytes.offset = bytes.length = 0; - int offsetInBlock = 0; - int offsetInBytesRef = offset; - - // Skip unneeded blocks - while (offsetInBlock + blockLength < offset) { - final int compressedLength = in.readVInt(); - in.skipBytes(compressedLength); - offsetInBlock += blockLength; - offsetInBytesRef -= blockLength; - } - - // Read blocks that intersect with the interval we need - while (offsetInBlock < offset + length) { - bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + blockLength); - final int compressedLength = in.readVInt(); - if (compressedLength == 0) { - return; - } - compressed = ArrayUtil.growNoCopy(compressed, compressedLength); - in.readBytes(compressed, 0, compressedLength); - - int l = Math.min(blockLength, originalLength - offsetInBlock); - bytes.bytes = ArrayUtil.grow(bytes.bytes, bytes.length + l); - - byte[] output = new byte[l]; - - final int uncompressed = (int) Zstd.decompressByteArray(output, 0, l, compressed, 0, compressedLength); - System.arraycopy(output, 0, bytes.bytes, bytes.length, uncompressed); - - bytes.length += uncompressed; - offsetInBlock += blockLength; - } - - bytes.offset = offsetInBytesRef; - bytes.length = length; - - assert bytes.isValid() : "decompression output is corrupted."; - } - - @Override - public Decompressor clone() { - return new ZstdDecompressor(); - } - } -} diff --git a/server/src/main/java/org/opensearch/index/engine/Engine.java b/server/src/main/java/org/opensearch/index/engine/Engine.java index 08db92af23d01..29865da143338 100644 --- a/server/src/main/java/org/opensearch/index/engine/Engine.java +++ b/server/src/main/java/org/opensearch/index/engine/Engine.java @@ -38,7 +38,10 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.SegmentReader; @@ -135,12 +138,15 @@ public abstract class Engine implements Closeable { public static final String MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID = "max_unsafe_auto_id_timestamp"; public static final String SEARCH_SOURCE = "search"; // TODO: Make source of search enum? public static final String CAN_MATCH_SEARCH_SOURCE = "can_match"; + public static final String FORCE_MERGE = "force merge"; + public static final String MERGE_FAILED = "merge failed"; protected final ShardId shardId; protected final Logger logger; protected final EngineConfig engineConfig; protected final Store store; protected final AtomicBoolean isClosed = new AtomicBoolean(false); + private final CounterMetric totalUnreferencedFileCleanUpsPerformed = new CounterMetric(); private final CountDownLatch closedLatch = new CountDownLatch(1); protected final EventListener eventListener; protected final ReentrantLock failEngineLock = new ReentrantLock(); @@ -263,6 +269,13 @@ protected final DocsStats docsStats(IndexReader indexReader) { return new DocsStats(numDocs, numDeletedDocs, sizeInBytes); } + /** + * Returns the unreferenced file cleanup count for this engine. + */ + public long unreferencedFileCleanUpsPerformed() { + return totalUnreferencedFileCleanUpsPerformed.count(); + } + /** * Performs the pre-closing checks on the {@link Engine}. * @@ -983,6 +996,10 @@ protected void fillSegmentStats(SegmentReader segmentReader, boolean includeSegm } } + boolean shouldCleanupUnreferencedFiles() { + return engineConfig.getIndexSettings().shouldCleanupUnreferencedFiles(); + } + private Map getSegmentFileSizes(SegmentReader segmentReader) { Directory directory = null; SegmentCommitInfo segmentCommitInfo = segmentReader.getSegmentInfo(); @@ -1239,7 +1256,7 @@ public final void flush() throws EngineException { /** * Rolls the translog generation and cleans unneeded. */ - public abstract void rollTranslogGeneration() throws EngineException; + public abstract void rollTranslogGeneration() throws EngineException, IOException; /** * Triggers a forced merge on this engine @@ -1343,6 +1360,14 @@ public void failEngine(String reason, @Nullable Exception failure) { ); } } + + // If cleanup of unreferenced flag is enabled and force merge or regular merge failed due to IOException, + // clean all unreferenced files on best effort basis created during failed merge and reset the + // shard state back to last Lucene Commit. + if (shouldCleanupUnreferencedFiles() && isMergeFailureDueToIOException(failure, reason)) { + cleanUpUnreferencedFiles(); + } + eventListener.onFailedEngine(reason, failure); } } catch (Exception inner) { @@ -1361,6 +1386,34 @@ public void failEngine(String reason, @Nullable Exception failure) { } } + /** + * Cleanup all unreferenced files generated during failed segment merge. This resets shard state to last Lucene + * commit. + */ + private void cleanUpUnreferencedFiles() { + try ( + IndexWriter writer = new IndexWriter( + store.directory(), + new IndexWriterConfig(Lucene.STANDARD_ANALYZER).setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) + .setCommitOnClose(false) + .setMergePolicy(NoMergePolicy.INSTANCE) + .setOpenMode(IndexWriterConfig.OpenMode.APPEND) + ) + ) { + // do nothing except increasing metric count and close this will kick off IndexFileDeleter which will + // remove all unreferenced files + totalUnreferencedFileCleanUpsPerformed.inc(); + } catch (Exception ex) { + logger.error("Error while deleting unreferenced file ", ex); + } + } + + /** Check whether the merge failure happened due to IOException. */ + private boolean isMergeFailureDueToIOException(Exception failure, String reason) { + return (reason.equals(FORCE_MERGE) || reason.equals(MERGE_FAILED)) + && ExceptionsHelper.unwrap(failure, IOException.class) instanceof IOException; + } + /** Check whether the engine should be failed */ protected boolean maybeFailEngine(String source, Exception e) { if (Lucene.isCorruptionException(e)) { diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java index 3351931a6b068..00f8ff6d3cd40 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java @@ -49,6 +49,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.codec.CodecAliases; import org.opensearch.index.codec.CodecService; import org.opensearch.index.codec.CodecSettings; import org.opensearch.index.mapper.ParsedDocument; @@ -133,18 +134,26 @@ public Supplier retentionLeasesSupplier() { case "lz4": case "best_compression": case "zlib": - case "zstd": - case "zstd_no_dict": case "lucene_default": return s; default: - if (Codec.availableCodecs().contains(s) == false) { // we don't error message the not officially supported ones - throw new IllegalArgumentException( - "unknown value for [index.codec] must be one of [default, lz4, best_compression, zlib, zstd, zstd_no_dict] but was: " - + s - ); + if (Codec.availableCodecs().contains(s)) { + return s; } - return s; + + for (String codecName : Codec.availableCodecs()) { + Codec codec = Codec.forName(codecName); + if (codec instanceof CodecAliases) { + CodecAliases codecWithAlias = (CodecAliases) codec; + if (codecWithAlias.aliases().contains(s)) { + return s; + } + } + } + + throw new IllegalArgumentException( + "unknown value for [index.codec] must be one of [default, lz4, best_compression, zlib] but was: " + s + ); } }, Property.IndexScope, Property.NodeScope); @@ -181,9 +190,6 @@ public void validate(String key, Object value, Object dependency) { private static void doValidateCodecSettings(final String codec) { switch (codec) { - case "zstd": - case "zstd_no_dict": - return; case "best_compression": case "zlib": case "lucene_default": @@ -198,6 +204,18 @@ private static void doValidateCodecSettings(final String codec) { return; } } + for (String codecName : Codec.availableCodecs()) { + Codec availableCodec = Codec.forName(codecName); + if (availableCodec instanceof CodecAliases) { + CodecAliases availableCodecWithAlias = (CodecAliases) availableCodec; + if (availableCodecWithAlias.aliases().contains(codec)) { + if (availableCodec instanceof CodecSettings + && ((CodecSettings) availableCodec).supports(INDEX_CODEC_COMPRESSION_LEVEL_SETTING)) { + return; + } + } + } + } } throw new IllegalArgumentException("Compression level cannot be set for the " + codec + " codec."); } @@ -238,6 +256,7 @@ private EngineConfig(Builder builder) { this.codecService = builder.codecService; this.eventListener = builder.eventListener; codecName = builder.indexSettings.getValue(INDEX_CODEC_SETTING); + // We need to make the indexing buffer for this shard at least as large // as the amount of memory that is available for all engines on the // local node so that decisions to flush segments to disk are made by diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index 50d0f62b8a79d..9e6136b27db24 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -402,7 +402,7 @@ public CompletionStats completionStats(String... fieldNamePatterns) { * The main purpose for this is that if we have external refreshes happening we don't issue extra * refreshes to clear version map memory etc. this can cause excessive segment creation if heavy indexing * is happening and the refresh interval is low (ie. 1 sec) - * + *

              * This also prevents segment starvation where an internal reader holds on to old segments literally forever * since no indexing is happening and refreshes are only happening to the external reader manager, while with * this specialized implementation an external refresh will immediately be reflected on the internal reader @@ -1953,6 +1953,13 @@ public void flush(boolean force, boolean waitIfOngoing) throws EngineException { try { translogManager.rollTranslogGeneration(); logger.trace("starting commit for flush; commitTranslog=true"); + // with Segment Replication we need to hold the latest commit before a new one is created and ensure it is released + // only after the active reader is updated. This ensures that a flush does not wipe out a required commit point file + // while we are + // in refresh listeners. + final GatedCloseable latestCommit = engineConfig.getIndexSettings().isSegRepEnabled() + ? acquireLastIndexCommit(false) + : null; commitIndexWriter(indexWriter, translogManager.getTranslog()); logger.trace("finished commit for flush"); @@ -1966,6 +1973,11 @@ public void flush(boolean force, boolean waitIfOngoing) throws EngineException { // we need to refresh in order to clear older version values refresh("version_table_flush", SearcherScope.INTERNAL, true); + + if (latestCommit != null) { + latestCommit.close(); + } + translogManager.trimUnreferencedReaders(); } catch (AlreadyClosedException e) { failOnTragicEvent(e); @@ -2177,7 +2189,7 @@ public void forceMerge( throw ex; } catch (Exception e) { try { - maybeFailEngine("force merge", e); + maybeFailEngine(FORCE_MERGE, e); } catch (Exception inner) { e.addSuppressed(inner); } @@ -2285,41 +2297,32 @@ protected SegmentInfos getLastCommittedSegmentInfos() { @Override protected SegmentInfos getLatestSegmentInfos() { - OpenSearchDirectoryReader reader = null; - try { - reader = internalReaderManager.acquire(); - return ((StandardDirectoryReader) reader.getDelegate()).getSegmentInfos(); + try (final GatedCloseable snapshot = getSegmentInfosSnapshot()) { + return snapshot.get(); } catch (IOException e) { throw new EngineException(shardId, e.getMessage(), e); - } finally { - try { - internalReaderManager.release(reader); - } catch (IOException e) { - throw new EngineException(shardId, e.getMessage(), e); - } } } /** - * Fetch the latest {@link SegmentInfos} object via {@link #getLatestSegmentInfos()} - * but also increment the ref-count to ensure that these segment files are retained - * until the reference is closed. On close, the ref-count is decremented. + * Fetch the latest {@link SegmentInfos} from the current ReaderManager's active DirectoryReader. + * This method will hold the reader reference until the returned {@link GatedCloseable} is closed. */ @Override public GatedCloseable getSegmentInfosSnapshot() { - final SegmentInfos segmentInfos = getLatestSegmentInfos(); + final OpenSearchDirectoryReader reader; try { - indexWriter.incRefDeleter(segmentInfos); + reader = internalReaderManager.acquire(); + return new GatedCloseable<>(((StandardDirectoryReader) reader.getDelegate()).getSegmentInfos(), () -> { + try { + internalReaderManager.release(reader); + } catch (AlreadyClosedException e) { + logger.warn("Engine is already closed.", e); + } + }); } catch (IOException e) { throw new EngineException(shardId, e.getMessage(), e); } - return new GatedCloseable<>(segmentInfos, () -> { - try { - indexWriter.decRefDeleter(segmentInfos); - } catch (AlreadyClosedException e) { - logger.warn("Engine is already closed.", e); - } - }); } @Override @@ -2636,7 +2639,7 @@ protected void doRun() throws Exception { * confidence that the call stack does not contain catch statements that would cause the error that might be thrown * here from being caught and never reaching the uncaught exception handler. */ - failEngine("merge failed", new MergePolicy.MergeException(exc)); + failEngine(MERGE_FAILED, new MergePolicy.MergeException(exc)); } }); } diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 83621688cad53..5166384ac3f90 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -24,6 +24,7 @@ import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.translog.Translog; +import org.opensearch.index.translog.TranslogCorruptedException; import org.opensearch.index.translog.TranslogDeletionPolicy; import org.opensearch.index.translog.TranslogException; import org.opensearch.index.translog.TranslogManager; @@ -40,6 +41,8 @@ import java.util.Map; import java.util.Objects; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; import java.util.function.BiFunction; import java.util.stream.Stream; @@ -59,6 +62,7 @@ public class NRTReplicationEngine extends Engine implements LifecycleAware { private final CompletionStatsCache completionStatsCache; private final LocalCheckpointTracker localCheckpointTracker; private final WriteOnlyTranslogManager translogManager; + private final Lock flushLock = new ReentrantLock(); protected final ReplicaFileTracker replicaFileTracker; private volatile long lastReceivedPrimaryGen = SequenceNumbers.NO_OPS_PERFORMED; @@ -70,6 +74,7 @@ public NRTReplicationEngine(EngineConfig engineConfig) { store.incRef(); NRTReplicationReaderManager readerManager = null; WriteOnlyTranslogManager translogManagerRef = null; + boolean success = false; try { this.replicaFileTracker = new ReplicaFileTracker(store::deleteQuiet); this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); @@ -123,9 +128,17 @@ public void onAfterTranslogSync() { engineConfig.getPrimaryModeSupplier() ); this.translogManager = translogManagerRef; - } catch (IOException e) { - IOUtils.closeWhileHandlingException(store::decRef, readerManager, translogManagerRef); + success = true; + } catch (IOException | TranslogCorruptedException e) { throw new EngineCreationFailureException(shardId, "failed to create engine", e); + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(readerManager, translogManagerRef); + if (isClosed.get() == false) { + // failure, we need to dec the store reference + store.decRef(); + } + } } } @@ -157,7 +170,7 @@ public synchronized void updateSegments(final SegmentInfos infos) throws IOExcep // a lower gen from a newly elected primary shard that is behind this shard's last commit gen. // In that case we still commit into the next local generation. if (incomingGeneration != this.lastReceivedPrimaryGen) { - commitSegmentInfos(); + flush(false, true); translogManager.getDeletionPolicy().setLocalCheckpointOfSafeCommit(maxSeqNo); translogManager.rollTranslogGeneration(); } @@ -168,7 +181,7 @@ public synchronized void updateSegments(final SegmentInfos infos) throws IOExcep /** * Persist the latest live SegmentInfos. - * + *

              * This method creates a commit point from the latest SegmentInfos. * * @throws IOException - When there is an IO error committing the SegmentInfos. @@ -185,7 +198,7 @@ private void commitSegmentInfos(SegmentInfos infos) throws IOException { translogManager.syncTranslog(); } - protected void commitSegmentInfos() throws IOException { + private void commitSegmentInfos() throws IOException { commitSegmentInfos(getLatestSegmentInfos()); } @@ -388,7 +401,28 @@ public boolean shouldPeriodicallyFlush() { } @Override - public void flush(boolean force, boolean waitIfOngoing) throws EngineException {} + public void flush(boolean force, boolean waitIfOngoing) throws EngineException { + ensureOpen(); + // readLock is held here to wait/block any concurrent close that acquires the writeLock. + try (final ReleasableLock lock = readLock.acquire()) { + ensureOpen(); + if (flushLock.tryLock() == false) { + if (waitIfOngoing == false) { + return; + } + flushLock.lock(); + } + // we are now locked. + try { + commitSegmentInfos(); + } catch (IOException e) { + maybeFailEngine("flush", e); + throw new FlushFailedEngineException(shardId, e); + } finally { + flushLock.unlock(); + } + } + } @Override public void trimUnreferencedTranslogFiles() throws EngineException { @@ -401,7 +435,7 @@ public boolean shouldRollTranslogGeneration() { } @Override - public void rollTranslogGeneration() throws EngineException { + public void rollTranslogGeneration() throws EngineException, IOException { translogManager.rollTranslogGeneration(); } @@ -417,6 +451,9 @@ public void forceMerge( @Override public GatedCloseable acquireLastIndexCommit(boolean flushFirst) throws EngineException { + if (flushFirst) { + flush(false, true); + } try { final IndexCommit indexCommit = Lucene.getIndexCommit(lastCommittedSegmentInfos, store.directory()); return new GatedCloseable<>(indexCommit, () -> {}); @@ -453,13 +490,29 @@ protected final void closeNoLock(String reason, CountDownLatch closedLatch) { latestSegmentInfos.counter = latestSegmentInfos.counter + SI_COUNTER_INCREMENT; latestSegmentInfos.changed(); } - commitSegmentInfos(latestSegmentInfos); - IOUtils.close(readerManager, translogManager, store::decRef); + try { + commitSegmentInfos(latestSegmentInfos); + } catch (IOException e) { + // mark the store corrupted unless we are closing as result of engine failure. + // in this case Engine#failShard will handle store corruption. + if (failEngineLock.isHeldByCurrentThread() == false && store.isMarkedCorrupted() == false) { + try { + store.markStoreCorrupted(e); + } catch (IOException ex) { + logger.warn("Unable to mark store corrupted", ex); + } + } + } + IOUtils.close(readerManager, translogManager); } catch (Exception e) { - logger.warn("failed to close engine", e); + logger.error("failed to close engine", e); } finally { - logger.debug("engine closed [{}]", reason); - closedLatch.countDown(); + try { + store.decRef(); + logger.debug("engine closed [{}]", reason); + } finally { + closedLatch.countDown(); + } } } } diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index c0ed9294353f5..ebe2bf6d0107a 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -522,7 +522,7 @@ public boolean shouldRollTranslogGeneration() { } @Override - public void rollTranslogGeneration() { + public void rollTranslogGeneration() throws IOException { translogManager.rollTranslogGeneration(); } diff --git a/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java b/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java index 2e8bd6409c2f6..19454967f9ee3 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java +++ b/server/src/main/java/org/opensearch/index/engine/ReplicaFileTracker.java @@ -22,7 +22,7 @@ /** * This class is heavily influenced by Lucene's ReplicaFileDeleter class used to keep track of * segment files that should be preserved on replicas between replication events. - * + *

              * https://github.com/apache/lucene/blob/main/lucene/replicator/src/java/org/apache/lucene/replicator/nrt/ReplicaFileDeleter.java * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java b/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java index d18579e662710..f4fd2490c7abe 100644 --- a/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java +++ b/server/src/main/java/org/opensearch/index/engine/SegmentsStats.java @@ -39,6 +39,7 @@ import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.remote.RemoteSegmentStats; import java.io.IOException; @@ -62,6 +63,11 @@ public class SegmentsStats implements Writeable, ToXContentFragment { private final RemoteSegmentStats remoteSegmentStats; private static final ByteSizeValue ZERO_BYTE_SIZE_VALUE = new ByteSizeValue(0L); + /** + * Segment replication statistics. + */ + private final ReplicationStats replicationStats; + /* * A map to provide a best-effort approach describing Lucene index files. * @@ -93,6 +99,7 @@ public class SegmentsStats implements Writeable, ToXContentFragment { public SegmentsStats() { fileSizes = new HashMap<>(); remoteSegmentStats = new RemoteSegmentStats(); + replicationStats = new ReplicationStats(); } public SegmentsStats(StreamInput in) throws IOException { @@ -115,8 +122,10 @@ public SegmentsStats(StreamInput in) throws IOException { fileSizes = in.readMap(StreamInput::readString, StreamInput::readLong); if (in.getVersion().onOrAfter(Version.V_2_10_0)) { remoteSegmentStats = in.readOptionalWriteable(RemoteSegmentStats::new); + replicationStats = in.readOptionalWriteable(ReplicationStats::new); } else { remoteSegmentStats = new RemoteSegmentStats(); + replicationStats = new ReplicationStats(); } } @@ -144,6 +153,10 @@ public void addRemoteSegmentStats(RemoteSegmentStats remoteSegmentStats) { this.remoteSegmentStats.add(remoteSegmentStats); } + public void addReplicationStats(ReplicationStats replicationStats) { + this.replicationStats.add(replicationStats); + } + public void addFileSizes(final Map newFileSizes) { newFileSizes.forEach((k, v) -> this.fileSizes.merge(k, v, (a, b) -> { assert a != null; @@ -163,6 +176,7 @@ public void add(SegmentsStats mergeStats) { addBitsetMemoryInBytes(mergeStats.bitsetMemoryInBytes); addFileSizes(mergeStats.fileSizes); addRemoteSegmentStats(mergeStats.remoteSegmentStats); + addReplicationStats(mergeStats.replicationStats); } /** @@ -215,6 +229,10 @@ public RemoteSegmentStats getRemoteSegmentStats() { return remoteSegmentStats; } + public ReplicationStats getReplicationStats() { + return replicationStats; + } + /** * Returns the max timestamp that is used to de-optimize documents with auto-generated IDs in the engine. * This is used to ensure we don't add duplicate documents when we assume an append only case based on auto-generated IDs @@ -239,6 +257,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, getBitsetMemory()); builder.field(Fields.MAX_UNSAFE_AUTO_ID_TIMESTAMP, maxUnsafeAutoIdTimestamp); remoteSegmentStats.toXContent(builder, params); + replicationStats.toXContent(builder, params); builder.startObject(Fields.FILE_SIZES); for (Map.Entry entry : fileSizes.entrySet()) { builder.startObject(entry.getKey()); @@ -307,6 +326,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(this.fileSizes, StreamOutput::writeString, StreamOutput::writeLong); if (out.getVersion().onOrAfter(Version.V_2_10_0)) { out.writeOptionalWriteable(remoteSegmentStats); + out.writeOptionalWriteable(replicationStats); } } diff --git a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java index 6aacb6c1cbedf..c1f69d1ef3638 100644 --- a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java @@ -52,7 +52,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; -import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.KnnCollector; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.opensearch.common.util.set.Sets; @@ -271,12 +271,12 @@ public ByteVectorValues getByteVectorValues(String field) throws IOException { } @Override - public TopDocs searchNearestVectors(String field, byte[] target, int k, Bits acceptDocs, int visitedLimit) throws IOException { + public void searchNearestVectors(String field, byte[] target, KnnCollector k, Bits acceptDocs) throws IOException { throw new UnsupportedOperationException(); } @Override - public TopDocs searchNearestVectors(String field, float[] target, int k, Bits acceptDocs, int visitedLimit) throws IOException { + public void searchNearestVectors(String field, float[] target, KnnCollector k, Bits acceptDocs) throws IOException { throw new UnsupportedOperationException(); } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java index f9db28a2c56fe..81d4ce2dd8772 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java @@ -94,6 +94,13 @@ public interface IndexFieldData { */ SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse); + /** + * Returns the {@link SortField} to use for index sorting where we widen the sort field type to higher or equal bytes. + */ + default SortField wideSortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { + return sortField(missingValue, sortMode, nested, reverse); + } + /** * Build a sort implementation specialized for aggregations. */ diff --git a/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java index ae8ffd8fe6b97..6fc074fe0de95 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/IndexNumericFieldData.java @@ -151,6 +151,25 @@ public final SortField sortField(Object missingValue, MultiValueMode sortMode, N return sortField(getNumericType(), missingValue, sortMode, nested, reverse); } + @Override + public final SortField wideSortField(Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { + // This is to support backward compatibility, the minimum number of bytes prior to OpenSearch 2.7 were 16 bytes, + // i.e all sort fields were upcasted to Long/Double with 16 bytes. + // Now from OpenSearch 2.7, the minimum number of bytes for sort field is 8 bytes, so if it comes as SortField INT, + // we need to up cast it to LONG to support backward compatibility info stored in segment info + if (getNumericType().sortFieldType == SortField.Type.INT) { + XFieldComparatorSource source = comparatorSource(NumericType.LONG, missingValue, sortMode, nested); + SortedNumericSelector.Type selectorType = sortMode == MultiValueMode.MAX + ? SortedNumericSelector.Type.MAX + : SortedNumericSelector.Type.MIN; + SortField sortField = new SortedNumericSortField(getFieldName(), SortField.Type.LONG, reverse, selectorType); + sortField.setMissingValue(source.missingObject(missingValue, reverse)); + return sortField; + } + // If already more than INT, up cast not needed. + return sortField(getNumericType(), missingValue, sortMode, nested, reverse); + } + /** * Builds a {@linkplain BucketedSort} for the {@code targetNumericType}, * casting the values if their native type doesn't match. @@ -223,8 +242,8 @@ private XFieldComparatorSource comparatorSource( assert !targetNumericType.isFloatingPoint(); source = new IntValuesComparatorSource(this, missingValue, sortMode, nested); } - if (targetNumericType != getNumericType()) { - source.disableSkipping(); // disable skipping logic for caste of sort field + if (targetNumericType != getNumericType() || getNumericType() == NumericType.HALF_FLOAT) { + source.disableSkipping(); // disable skipping logic for cast of sort field } return source; } diff --git a/server/src/main/java/org/opensearch/index/fielddata/MultiGeoPointValues.java b/server/src/main/java/org/opensearch/index/fielddata/MultiGeoPointValues.java index 2df4baeb8631b..3090b8e7f5b15 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/MultiGeoPointValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/MultiGeoPointValues.java @@ -73,7 +73,7 @@ protected MultiGeoPointValues() {} /** * Return the next value associated with the current document. This must not be * called more than {@link #docValueCount()} times. - * + *

              * Note: the returned {@link GeoPoint} might be shared across invocations. * * @return the next value for the current docID set to {@link #advanceExact(int)}. diff --git a/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java b/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java index 1d1524e223f00..29fd5ae1a216c 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java +++ b/server/src/main/java/org/opensearch/index/fielddata/ScriptDocValues.java @@ -55,7 +55,7 @@ /** * Script level doc values, the assumption is that any implementation will * implement a {@link Longs#getValue getValue} method. - * + *

              * Implementations should not internally re-use objects for the values that they * return as a single {@link ScriptDocValues} instance can be reused to return * values form multiple documents. @@ -589,11 +589,11 @@ public BytesRef get(int index) { + "Use doc[].size()==0 to check if a document is missing a field!" ); } - /** - * We need to make a copy here because {@link BinaryScriptDocValues} might reuse the - * returned value and the same instance might be used to - * return values from multiple documents. - **/ + /* + We need to make a copy here because {@link BinaryScriptDocValues} might reuse the + returned value and the same instance might be used to + return values from multiple documents. + */ return values[index].toBytesRef(); } diff --git a/server/src/main/java/org/opensearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java index 4f27c9b10f0ee..fe033fa7a3f70 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/plain/SortedSetOrdinalsIndexFieldData.java @@ -101,9 +101,9 @@ public SortedSetOrdinalsIndexFieldData( @Override public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); - /** - * Check if we can use a simple {@link SortedSetSortField} compatible with index sorting and - * returns a custom sort field otherwise. + /* + Check if we can use a simple {@link SortedSetSortField} compatible with index sorting and + returns a custom sort field otherwise. */ if (nested != null || (sortMode != MultiValueMode.MAX && sortMode != MultiValueMode.MIN) diff --git a/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java index a038cf178bb03..3b6782b34feea 100644 --- a/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/AbstractGeometryFieldMapper.java @@ -121,7 +121,7 @@ public abstract static class Parser { /** * Given a parsed value and a format string, formats the value into a plain Java object. - * + *

              * Supported formats include 'geojson' and 'wkt'. The different formats are defined * as subclasses of {@link org.opensearch.common.geo.GeometryFormat}. */ @@ -129,7 +129,7 @@ public abstract static class Parser { /** * Parses the given value, then formats it according to the 'format' string. - * + *

              * By default, this method simply parses the value using {@link Parser#parse}, then formats * it with {@link Parser#format}. However some {@link Parser} implementations override this * as they can avoid parsing the value if it is already in the right format. diff --git a/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java index f5dc34ab8ac5d..b3112df86bab6 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/ArraySourceValueFetcher.java @@ -43,7 +43,7 @@ /** * An implementation of {@link ValueFetcher} that knows how to extract values * from the document source. - * + *

              * This class differs from {@link SourceValueFetcher} in that it directly handles * array values in parsing. Field types should use this class if their corresponding * mapper returns true for {@link FieldMapper#parsesArrayValue()}. diff --git a/server/src/main/java/org/opensearch/index/mapper/BinaryFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/BinaryFieldMapper.java index 62a4af247e0fa..040491f775357 100644 --- a/server/src/main/java/org/opensearch/index/mapper/BinaryFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/BinaryFieldMapper.java @@ -242,6 +242,10 @@ protected String contentType() { */ public static class CustomBinaryDocValuesField extends CustomDocValuesField { + // We considered using a TreeSet instead of an ArrayList here. + // Benchmarks show that ArrayList performs much better + // For details, see: https://github.com/opensearch-project/OpenSearch/pull/9426 + // Benchmarks are in CustomBinaryDocValuesFiledBenchmark private final ArrayList bytesList; public CustomBinaryDocValuesField(String name, byte[] bytes) { diff --git a/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java index 90ef3cc1689a4..e1413fd9b4bbe 100644 --- a/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java @@ -447,13 +447,13 @@ int getMaxInputLength() { /** * Parses and indexes inputs - * + *

              * Parsing: * Acceptable format: * "STRING" - interpreted as field value (input) * "ARRAY" - each element can be one of "OBJECT" (see below) * "OBJECT" - { "input": STRING|ARRAY, "weight": STRING|INT, "contexts": ARRAY|OBJECT } - * + *

              * Indexing: * if context mappings are defined, delegates to {@link ContextMappings#addField(ParseContext.Document, String, String, int, Map)} * else adds inputs as a {@link org.apache.lucene.search.suggest.document.SuggestField} diff --git a/server/src/main/java/org/opensearch/index/mapper/CustomDocValuesField.java b/server/src/main/java/org/opensearch/index/mapper/CustomDocValuesField.java index 2776e7515bbf6..fbb67731f581b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/CustomDocValuesField.java +++ b/server/src/main/java/org/opensearch/index/mapper/CustomDocValuesField.java @@ -42,8 +42,7 @@ import java.io.Reader; /** - * Base class for constructing a custom docvalues type - * + * Base class for constructing a custom docvalues type. * used for binary, geo, and range fields * * @opensearch.api diff --git a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java index d9b26cce3735c..4c374f02bba52 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java @@ -53,6 +53,7 @@ import org.opensearch.common.time.DateMathParser; import org.opensearch.common.time.DateUtils; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.LocaleUtils; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexNumericFieldData.NumericType; @@ -92,7 +93,21 @@ public final class DateFieldMapper extends ParametrizedFieldMapper { public static final String CONTENT_TYPE = "date"; public static final String DATE_NANOS_CONTENT_TYPE = "date_nanos"; - public static final DateFormatter DEFAULT_DATE_TIME_FORMATTER = DateFormatter.forPattern("strict_date_optional_time||epoch_millis"); + @Deprecated + public static final DateFormatter LEGACY_DEFAULT_DATE_TIME_FORMATTER = DateFormatter.forPattern( + // TODO remove in 3.0 after backporting + "strict_date_optional_time||epoch_millis" + ); + public static final DateFormatter DEFAULT_DATE_TIME_FORMATTER = DateFormatter.forPattern( + "strict_date_time_no_millis||strict_date_optional_time||epoch_millis", + "strict_date_optional_time" + ); + + public static DateFormatter getDefaultDateTimeFormatter() { + return FeatureFlags.isEnabled(FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING) + ? DEFAULT_DATE_TIME_FORMATTER + : LEGACY_DEFAULT_DATE_TIME_FORMATTER; + } /** * Resolution of the date time @@ -224,8 +239,14 @@ public static class Builder extends ParametrizedFieldMapper.Builder { "format", false, m -> toType(m).format, - DEFAULT_DATE_TIME_FORMATTER.pattern() + getDefaultDateTimeFormatter().pattern() ); + private final Parameter printFormat = Parameter.stringParam( + "print_format", + false, + m -> toType(m).printFormat, + getDefaultDateTimeFormatter().printPattern() + ).acceptsNull(); private final Parameter locale = new Parameter<>( "locale", false, @@ -254,6 +275,7 @@ public Builder( this.ignoreMalformed = Parameter.boolParam("ignore_malformed", true, m -> toType(m).ignoreMalformed, ignoreMalformedByDefault); if (dateFormatter != null) { this.format.setValue(dateFormatter.pattern()); + this.printFormat.setValue(dateFormatter.printPattern()); this.locale.setValue(dateFormatter.locale()); } } @@ -262,9 +284,13 @@ private DateFormatter buildFormatter() { try { if (Joda.isJodaPattern(indexCreatedVersion, format.getValue())) { return Joda.forPattern(format.getValue()).withLocale(locale.getValue()); - } else { - return DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); } + if (format.isConfigured() && !printFormat.isConfigured()) { + return DateFormatter.forPattern(format.getValue(), null, !format.isConfigured()).withLocale(locale.getValue()); + } + return DateFormatter.forPattern(format.getValue(), printFormat.getValue(), !format.isConfigured()) + .withLocale(locale.getValue()); + } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Error parsing [format] on field [" + name() + "]: " + e.getMessage(), e); } @@ -272,7 +298,7 @@ private DateFormatter buildFormatter() { @Override protected List> getParameters() { - return Arrays.asList(index, docValues, store, format, locale, nullValue, ignoreMalformed, boost, meta); + return Arrays.asList(index, docValues, store, format, printFormat, locale, nullValue, ignoreMalformed, boost, meta); } private Long parseNullValue(DateFieldType fieldType) { @@ -351,7 +377,7 @@ public DateFieldType( } public DateFieldType(String name) { - this(name, true, false, true, DEFAULT_DATE_TIME_FORMATTER, Resolution.MILLISECONDS, null, Collections.emptyMap()); + this(name, true, false, true, getDefaultDateTimeFormatter(), Resolution.MILLISECONDS, null, Collections.emptyMap()); } public DateFieldType(String name, DateFormatter dateFormatter) { @@ -359,7 +385,7 @@ public DateFieldType(String name, DateFormatter dateFormatter) { } public DateFieldType(String name, Resolution resolution) { - this(name, true, false, true, DEFAULT_DATE_TIME_FORMATTER, resolution, null, Collections.emptyMap()); + this(name, true, false, true, getDefaultDateTimeFormatter(), resolution, null, Collections.emptyMap()); } public DateFieldType(String name, Resolution resolution, DateFormatter dateFormatter) { @@ -615,6 +641,7 @@ public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { private final boolean hasDocValues; private final Locale locale; private final String format; + private final String printFormat; private final boolean ignoreMalformed; private final Long nullValue; private final String nullValueAsString; @@ -638,6 +665,7 @@ private DateFieldMapper( this.hasDocValues = builder.docValues.getValue(); this.locale = builder.locale.getValue(); this.format = builder.format.getValue(); + this.printFormat = builder.printFormat.getValue(); this.ignoreMalformed = builder.ignoreMalformed.getValue(); this.nullValueAsString = builder.nullValue.getValue(); this.nullValue = nullValue; diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java index 8b0af72ba0852..22280dd55b9eb 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java @@ -212,7 +212,7 @@ private Tuple> extractMapping(String type, String so /** * Given an optional type name and mapping definition, returns the type and a normalized form of the mappings. - * + *

              * The provided mapping definition may or may not contain the type name as the root key in the map. This method * attempts to unwrap the mappings, so that they no longer contain a type name at the root. If no type name can * be found, through either the 'type' parameter or by examining the provided mappings, then an exception will be @@ -220,7 +220,6 @@ private Tuple> extractMapping(String type, String so * * @param type An optional type name. * @param root The mapping definition. - * * @return A tuple of the form (type, normalized mappings). */ @SuppressWarnings({ "unchecked" }) diff --git a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java index 94bc4806ba0e0..2e59d86f9119c 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldMapper.java @@ -43,7 +43,7 @@ * to {@link DynamicKeyFieldMapper#keyedFieldType(String)}, with 'some_key' passed as the * argument. The field mapper is allowed to create a new field type dynamically in order * to handle the search. - * + *

              * To prevent conflicts between these dynamic sub-keys and multi-fields, any field mappers * implementing this interface should explicitly disallow multi-fields. The constructor makes * sure to passes an empty multi-fields list to help prevent conflicting sub-keys from being diff --git a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldTypeLookup.java b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldTypeLookup.java index 13150ddc50a51..a415078108eb6 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldTypeLookup.java +++ b/server/src/main/java/org/opensearch/index/mapper/DynamicKeyFieldTypeLookup.java @@ -37,11 +37,11 @@ /** * A container that supports looking up field types for 'dynamic key' fields ({@link DynamicKeyFieldMapper}). - * + *

              * Compared to standard fields, 'dynamic key' fields require special handling. Given a field name of the form * 'path_to_field.path_to_key', the container will dynamically return a new {@link MappedFieldType} that is * suitable for performing searches on the sub-key. - * + *

              * Note: we anticipate that 'flattened' fields will be the only implementation {@link DynamicKeyFieldMapper}. * Flattened object fields live in the 'mapper-flattened' module. * diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldAliasMapper.java b/server/src/main/java/org/opensearch/index/mapper/FieldAliasMapper.java index 86c92ec19a2f7..ff9cb61b85571 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FieldAliasMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FieldAliasMapper.java @@ -43,7 +43,7 @@ /** * A mapper for field aliases. - * + *

              * A field alias has no concrete field mappings of its own, but instead points to another field by * its path. Once defined, an alias can be used in place of the concrete field name in search requests. * diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java index f8514c86fa418..f6178a8284945 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FieldMapper.java @@ -305,7 +305,7 @@ public void parse(ParseContext context) throws IOException { /** * Parse the field value and populate the fields on {@link ParseContext#doc()}. - * + *

              * Implementations of this method should ensure that on failing to parse parser.currentToken() must be the * current failing token */ diff --git a/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java b/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java index 8e1b6f2a3c08b..549ab0403e5dc 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java +++ b/server/src/main/java/org/opensearch/index/mapper/FieldTypeLookup.java @@ -57,7 +57,7 @@ class FieldTypeLookup implements Iterable { * A map from field name to all fields whose content has been copied into it * through copy_to. A field only be present in the map if some other field * has listed it as a target of copy_to. - * + *

              * For convenience, the set of copied fields includes the field itself. */ private final Map> fieldToCopiedFields = new HashMap<>(); @@ -133,7 +133,7 @@ public Set simpleMatchToFullName(String pattern) { /** * Given a concrete field name, return its paths in the _source. - * + *

              * For most fields, the source path is the same as the field itself. However * there are cases where a field's values are found elsewhere in the _source: * - For a multi-field, the source path is the parent field. diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index db35c3edcc4a8..00b623dddac23 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -575,9 +575,9 @@ protected void parseCreateField(ParseContext context) throws IOException { context, fieldType().name() ); - /** - * JsonToStringParser is the main parser class to transform JSON into stringFields in a XContentParser - * It reads the JSON object and parsed to a list of string + /* + JsonToStringParser is the main parser class to transform JSON into stringFields in a XContentParser + It reads the JSON object and parsed to a list of string */ XContentParser parser = JsonToStringParser.parseObject(); diff --git a/server/src/main/java/org/opensearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/GeoPointFieldMapper.java index e3dab3f892949..fcca7e9804bf3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/GeoPointFieldMapper.java @@ -63,7 +63,7 @@ /** * Field Mapper for geo_point types. - * + *

              * Uses lucene 6 LatLonPoint encoding * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index 92ee8067ee4a0..c14b2c92c89c3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -38,11 +38,24 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.RegexpQuery; +import org.apache.lucene.search.TermInSetQuery; +import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.Operations; +import org.opensearch.OpenSearchException; import org.opensearch.common.Nullable; +import org.opensearch.common.lucene.BytesRefs; import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.search.AutomatonQueries; +import org.opensearch.common.unit.Fuzziness; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.analysis.NamedAnalyzer; @@ -62,6 +75,8 @@ import java.util.Objects; import java.util.function.Supplier; +import static org.opensearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; + /** * A field mapper for keywords. This mapper accepts strings and indexes them as-is. * @@ -317,7 +332,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S @Override public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { if (format != null) { - throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't " + "support formats."); } return new SourceValueFetcher(name(), context, nullValue) { @@ -372,17 +387,226 @@ protected BytesRef indexedValueForSearch(Object value) { return getTextSearchInfo().getSearchAnalyzer().normalize(name(), value.toString()); } + @Override + public Query termsQuery(List values, QueryShardContext context) { + failIfNotIndexedAndNoDocValues(); + // has index and doc_values enabled + if (isSearchable() && hasDocValues()) { + BytesRef[] bytesRefs = new BytesRef[values.size()]; + for (int i = 0; i < bytesRefs.length; i++) { + bytesRefs[i] = indexedValueForSearch(values.get(i)); + } + Query indexQuery = new TermInSetQuery(name(), bytesRefs); + Query dvQuery = new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, name(), bytesRefs); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + // if we only have doc_values enabled, we construct a new query with doc_values re-written + if (hasDocValues()) { + BytesRef[] bytesRefs = new BytesRef[values.size()]; + for (int i = 0; i < bytesRefs.length; i++) { + bytesRefs[i] = indexedValueForSearch(values.get(i)); + } + return new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, name(), bytesRefs); + } + // has index enabled, we're going to return the query as is + return super.termsQuery(values, context); + } + + @Override + public Query prefixQuery( + String value, + @Nullable MultiTermQuery.RewriteMethod method, + boolean caseInsensitive, + QueryShardContext context + ) { + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[prefix] queries cannot be executed when '" + + ALLOW_EXPENSIVE_QUERIES.getKey() + + "' is set to false. For optimised prefix queries on text " + + "fields please enable [index_prefixes]." + ); + } + failIfNotIndexedAndNoDocValues(); + if (isSearchable() && hasDocValues()) { + Query indexQuery = super.prefixQuery(value, method, caseInsensitive, context); + Query dvQuery = super.prefixQuery(value, MultiTermQuery.DOC_VALUES_REWRITE, caseInsensitive, context); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + if (hasDocValues()) { + if (caseInsensitive) { + return AutomatonQueries.caseInsensitivePrefixQuery( + (new Term(name(), indexedValueForSearch(value))), + MultiTermQuery.DOC_VALUES_REWRITE + ); + } + return new PrefixQuery(new Term(name(), indexedValueForSearch(value)), MultiTermQuery.DOC_VALUES_REWRITE); + } + return super.prefixQuery(value, method, caseInsensitive, context); + } + + @Override + public Query regexpQuery( + String value, + int syntaxFlags, + int matchFlags, + int maxDeterminizedStates, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[regexp] queries cannot be executed when '" + ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to " + "false." + ); + } + failIfNotIndexedAndNoDocValues(); + if (isSearchable() && hasDocValues()) { + Query indexQuery = super.regexpQuery(value, syntaxFlags, matchFlags, maxDeterminizedStates, method, context); + Query dvQuery = super.regexpQuery( + value, + syntaxFlags, + matchFlags, + maxDeterminizedStates, + MultiTermQuery.DOC_VALUES_REWRITE, + context + ); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + if (hasDocValues()) { + return new RegexpQuery( + new Term(name(), indexedValueForSearch(value)), + syntaxFlags, + matchFlags, + RegexpQuery.DEFAULT_PROVIDER, + maxDeterminizedStates, + MultiTermQuery.DOC_VALUES_REWRITE + ); + } + return super.regexpQuery(value, syntaxFlags, matchFlags, maxDeterminizedStates, method, context); + } + + @Override + public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[range] queries on [text] or [keyword] fields cannot be executed when '" + + ALLOW_EXPENSIVE_QUERIES.getKey() + + "' is set to false." + ); + } + failIfNotIndexedAndNoDocValues(); + if (isSearchable() && hasDocValues()) { + Query indexQuery = new TermRangeQuery( + name(), + lowerTerm == null ? null : indexedValueForSearch(lowerTerm), + upperTerm == null ? null : indexedValueForSearch(upperTerm), + includeLower, + includeUpper + ); + Query dvQuery = new TermRangeQuery( + name(), + lowerTerm == null ? null : indexedValueForSearch(lowerTerm), + upperTerm == null ? null : indexedValueForSearch(upperTerm), + includeLower, + includeUpper, + MultiTermQuery.DOC_VALUES_REWRITE + ); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + if (hasDocValues()) { + return new TermRangeQuery( + name(), + lowerTerm == null ? null : indexedValueForSearch(lowerTerm), + upperTerm == null ? null : indexedValueForSearch(upperTerm), + includeLower, + includeUpper, + MultiTermQuery.DOC_VALUES_REWRITE + ); + } + return new TermRangeQuery( + name(), + lowerTerm == null ? null : indexedValueForSearch(lowerTerm), + upperTerm == null ? null : indexedValueForSearch(upperTerm), + includeLower, + includeUpper + ); + } + + @Override + public Query fuzzyQuery( + Object value, + Fuzziness fuzziness, + int prefixLength, + int maxExpansions, + boolean transpositions, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + failIfNotIndexedAndNoDocValues(); + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[fuzzy] queries cannot be executed when '" + ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to " + "false." + ); + } + if (isSearchable() && hasDocValues()) { + Query indexQuery = super.fuzzyQuery(value, fuzziness, prefixLength, maxExpansions, transpositions, context); + Query dvQuery = super.fuzzyQuery( + value, + fuzziness, + prefixLength, + maxExpansions, + transpositions, + MultiTermQuery.DOC_VALUES_REWRITE, + context + ); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + if (hasDocValues()) { + return new FuzzyQuery( + new Term(name(), indexedValueForSearch(value)), + fuzziness.asDistance(BytesRefs.toString(value)), + prefixLength, + maxExpansions, + transpositions, + MultiTermQuery.DOC_VALUES_REWRITE + ); + } + return super.fuzzyQuery(value, fuzziness, prefixLength, maxExpansions, transpositions, context); + } + @Override public Query wildcardQuery( String value, @Nullable MultiTermQuery.RewriteMethod method, - boolean caseInsensitve, + boolean caseInsensitive, QueryShardContext context ) { - // keyword field types are always normalized, so ignore case sensitivity and force normalize the wildcard + if (context.allowExpensiveQueries() == false) { + throw new OpenSearchException( + "[wildcard] queries cannot be executed when '" + ALLOW_EXPENSIVE_QUERIES.getKey() + "' is set to " + "false." + ); + } + failIfNotIndexedAndNoDocValues(); + // keyword field types are always normalized, so ignore case sensitivity and force normalize the + // wildcard // query text - return super.wildcardQuery(value, method, caseInsensitve, true, context); + if (isSearchable() && hasDocValues()) { + Query indexQuery = super.wildcardQuery(value, method, caseInsensitive, true, context); + Query dvQuery = super.wildcardQuery(value, MultiTermQuery.DOC_VALUES_REWRITE, caseInsensitive, true, context); + return new IndexOrDocValuesQuery(indexQuery, dvQuery); + } + if (hasDocValues()) { + Term term; + value = normalizeWildcardPattern(name(), value, getTextSearchInfo().getSearchAnalyzer()); + term = new Term(name(), value); + if (caseInsensitive) { + return AutomatonQueries.caseInsensitiveWildcardQuery(term, method); + } + return new WildcardQuery(term, Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, MultiTermQuery.DOC_VALUES_REWRITE); + } + return super.wildcardQuery(value, method, caseInsensitive, true, context); } + } private final boolean indexed; @@ -422,8 +646,10 @@ protected KeywordFieldMapper( this.indexAnalyzers = builder.indexAnalyzers; } - /** Values that have more chars than the return value of this method will - * be skipped at parsing time. */ + /** + * Values that have more chars than the return value of this method will + * be skipped at parsing time. + */ public int ignoreAbove() { return ignoreAbove; } diff --git a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java index 58aa0bb2576e2..62acad99074c2 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java @@ -125,7 +125,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S /** * Create a helper class to fetch field values during the {@link FetchFieldsPhase}. - * + *

              * New field types must implement this method in order to support the search 'fields' option. Except * for metadata fields, field types should not throw {@link UnsupportedOperationException} since this * could cause a search retrieving multiple fields (like "fields": ["*"]) to fail. @@ -269,6 +269,21 @@ public Query fuzzyQuery( ); } + // Fuzzy Query with re-write method + public Query fuzzyQuery( + Object value, + Fuzziness fuzziness, + int prefixLength, + int maxExpansions, + boolean transpositions, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + throw new IllegalArgumentException( + "Can only use fuzzy queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]" + ); + } + // Case sensitive form of prefix query public final Query prefixQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) { return prefixQuery(value, method, false, context); @@ -433,6 +448,15 @@ protected final void failIfNotIndexed() { } } + protected final void failIfNotIndexedAndNoDocValues() { + // we fail if a field is both not indexed and does not have doc_values enabled + if (isIndexed == false && hasDocValues() == false) { + throw new IllegalArgumentException( + "Cannot search on field [" + name() + "] since it is both not indexed," + " and does not have doc_values enabled." + ); + } + } + public boolean eagerGlobalOrdinals() { return eagerGlobalOrdinals; } @@ -487,7 +511,7 @@ public Map meta() { /** * Returns information on how any text in this field is indexed - * + *

              * Fields that do not support any text-based queries should return * {@link TextSearchInfo#NONE}. Some fields (eg numeric) may support * only simple match queries, and can return diff --git a/server/src/main/java/org/opensearch/index/mapper/Mapper.java b/server/src/main/java/org/opensearch/index/mapper/Mapper.java index 59c647d38f0de..9c0e3ef72549a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/Mapper.java @@ -190,7 +190,7 @@ public Supplier queryShardContextSupplier() { /** * Gets an optional default date format for date fields that do not have an explicit format set - * + *

              * If {@code null}, then date fields will default to {@link DateFieldMapper#DEFAULT_DATE_TIME_FORMATTER}. */ public DateFormatter getDateFormatter() { diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index aad37ffec6683..25809fd3e57e8 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -173,9 +173,6 @@ public enum MergeReason { ); private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(MapperService.class); - static final String DEFAULT_MAPPING_ERROR_MESSAGE = "[_default_] mappings are not allowed on new indices and should no " - + "longer be used. See [https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking-changes-7.0.html" - + "#default-mapping-not-allowed] for more information."; private final IndexAnalyzers indexAnalyzers; diff --git a/server/src/main/java/org/opensearch/index/mapper/MappingLookup.java b/server/src/main/java/org/opensearch/index/mapper/MappingLookup.java index 024f4b71584bf..1c608fc52c1f5 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MappingLookup.java +++ b/server/src/main/java/org/opensearch/index/mapper/MappingLookup.java @@ -157,7 +157,7 @@ public MappingLookup( /** * Returns the leaf mapper associated with this field name. Note that the returned mapper * could be either a concrete {@link FieldMapper}, or a {@link FieldAliasMapper}. - * + *

              * To access a field's type information, {@link MapperService#fieldType} should be used instead. */ public Mapper getMapper(String field) { diff --git a/server/src/main/java/org/opensearch/index/mapper/MetadataFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/MetadataFieldMapper.java index 4f5aefdfeed55..5a45ab72994ee 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MetadataFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/MetadataFieldMapper.java @@ -70,7 +70,7 @@ public interface TypeParser extends Mapper.TypeParser { /** * Declares an updateable boolean parameter for a metadata field - * + *

              * We need to distinguish between explicit configuration and default value for metadata * fields, because mapping updates will carry over the previous metadata values if a * metadata field is not explicitly declared in the update. A standard boolean diff --git a/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java index fd57975831e88..93b929a82f095 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/ParametrizedFieldMapper.java @@ -63,11 +63,11 @@ /** * Defines how a particular field should be indexed and searched - * + *

              * Configuration {@link Parameter}s for the mapper are defined on a {@link Builder} subclass, * and returned by its {@link Builder#getParameters()} method. Merging, serialization * and parsing of the mapper are all mediated through this set of parameters. - * + *

              * Subclasses should implement a {@link Builder} that is returned from the * {@link #getMergeBuilder()} method, initialised with the existing builder. * @@ -86,7 +86,7 @@ protected ParametrizedFieldMapper(String simpleName, MappedFieldType mappedField /** * Returns a {@link Builder} to be used for merging and serialization - * + *

              * Implement as follows: * {@code return new MyBuilder(simpleName()).init(this); } */ @@ -256,7 +256,7 @@ public Parameter acceptsNull() { /** * Adds a deprecated parameter name. - * + *

              * If this parameter name is encountered during parsing, a deprecation warning will * be emitted. The parameter will be serialized with its main name. */ diff --git a/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java index 3723e469d3049..539a4d048246c 100644 --- a/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java @@ -96,7 +96,7 @@ public class RangeFieldMapper extends ParametrizedFieldMapper { */ public static class Defaults { public static final Explicit COERCE = new Explicit<>(true, false); - public static final DateFormatter DATE_FORMATTER = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER; + public static final DateFormatter DATE_FORMATTER = DateFieldMapper.getDefaultDateTimeFormatter(); } // this is private since it has a different default diff --git a/server/src/main/java/org/opensearch/index/mapper/RangeType.java b/server/src/main/java/org/opensearch/index/mapper/RangeType.java index c8cd317779c7c..7e29fd417845b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/RangeType.java +++ b/server/src/main/java/org/opensearch/index/mapper/RangeType.java @@ -313,7 +313,7 @@ public Query rangeQuery( ) { ZoneId zone = (timeZone == null) ? ZoneOffset.UTC : timeZone; - DateMathParser dateMathParser = (parser == null) ? DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser() : parser; + DateMathParser dateMathParser = (parser == null) ? DateFieldMapper.getDefaultDateTimeFormatter().toDateMathParser() : parser; boolean roundUp = includeLower == false; // using "gt" should round lower bound up Long low = lowerTerm == null ? minValue() diff --git a/server/src/main/java/org/opensearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/opensearch/index/mapper/RootObjectMapper.java index f9a7d7c72565e..4f4a5b55dd029 100644 --- a/server/src/main/java/org/opensearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/RootObjectMapper.java @@ -73,7 +73,7 @@ public class RootObjectMapper extends ObjectMapper { */ public static class Defaults { public static final DateFormatter[] DYNAMIC_DATE_TIME_FORMATTERS = new DateFormatter[] { - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis") }; public static final boolean DATE_DETECTION = true; public static final boolean NUMERIC_DETECTION = false; diff --git a/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java index 16f76f087e403..038066773a360 100644 --- a/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/SeqNoFieldMapper.java @@ -55,11 +55,11 @@ /** * Mapper for the {@code _seq_no} field. - * + *

              * We expect to use the seq# for sorting, during collision checking and for * doing range searches. Therefore the {@code _seq_no} field is stored both * as a numeric doc value and as numeric indexed field. - * + *

              * This mapper also manages the primary term field, which has no OpenSearch named * equivalent. The primary term is only used during collision after receiving * identical seq# values for two document copies. The primary term is stored as diff --git a/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java index 69f53ba126790..a32d1c9f489ca 100644 --- a/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/SourceValueFetcher.java @@ -46,7 +46,7 @@ * An implementation of {@link ValueFetcher} that knows how to extract values * from the document source. Most standard field mappers will use this class * to implement value fetching. - * + *

              * Field types that handle arrays directly should instead use {@link ArraySourceValueFetcher}. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/mapper/ValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/ValueFetcher.java index d1cea3fe7f1b0..96237b16ea5a4 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/ValueFetcher.java @@ -48,11 +48,11 @@ public interface ValueFetcher { /** * Given access to a document's _source, return this field's values. - * + *

              * In addition to pulling out the values, they will be parsed into a standard form. * For example numeric field mappers make sure to parse the source value into a number * of the right type. - * + *

              * Note that for array values, the order in which values are returned is undefined and * should not be relied on. * diff --git a/server/src/main/java/org/opensearch/index/merge/MergeStats.java b/server/src/main/java/org/opensearch/index/merge/MergeStats.java index 37fdca8871b18..a284cec247ff1 100644 --- a/server/src/main/java/org/opensearch/index/merge/MergeStats.java +++ b/server/src/main/java/org/opensearch/index/merge/MergeStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.merge; +import org.opensearch.Version; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -65,9 +66,9 @@ public class MergeStats implements Writeable, ToXContentFragment { private long totalBytesPerSecAutoThrottle; - public MergeStats() { + private long unreferencedFileCleanUpsPerformed; - } + public MergeStats() {} public MergeStats(StreamInput in) throws IOException { total = in.readVLong(); @@ -81,6 +82,9 @@ public MergeStats(StreamInput in) throws IOException { totalStoppedTimeInMillis = in.readVLong(); totalThrottledTimeInMillis = in.readVLong(); totalBytesPerSecAutoThrottle = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_2_11_0)) { + unreferencedFileCleanUpsPerformed = in.readOptionalVLong(); + } } public void add( @@ -133,6 +137,7 @@ public void addTotals(MergeStats mergeStats) { this.totalSizeInBytes += mergeStats.totalSizeInBytes; this.totalStoppedTimeInMillis += mergeStats.totalStoppedTimeInMillis; this.totalThrottledTimeInMillis += mergeStats.totalThrottledTimeInMillis; + addUnreferencedFileCleanUpStats(mergeStats.unreferencedFileCleanUpsPerformed); if (this.totalBytesPerSecAutoThrottle == Long.MAX_VALUE || mergeStats.totalBytesPerSecAutoThrottle == Long.MAX_VALUE) { this.totalBytesPerSecAutoThrottle = Long.MAX_VALUE; } else { @@ -140,6 +145,14 @@ public void addTotals(MergeStats mergeStats) { } } + public void addUnreferencedFileCleanUpStats(long unreferencedFileCleanUpsPerformed) { + this.unreferencedFileCleanUpsPerformed += unreferencedFileCleanUpsPerformed; + } + + public long getUnreferencedFileCleanUpsPerformed() { + return this.unreferencedFileCleanUpsPerformed; + } + /** * The total number of merges executed. */ @@ -240,6 +253,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.TOTAL_THROTTLE_BYTES_PER_SEC).value(new ByteSizeValue(totalBytesPerSecAutoThrottle).toString()); } builder.field(Fields.TOTAL_THROTTLE_BYTES_PER_SEC_IN_BYTES, totalBytesPerSecAutoThrottle); + builder.field(Fields.UNREFERENCED_FILE_CLEANUPS_PERFORMED, unreferencedFileCleanUpsPerformed); builder.endObject(); return builder; } @@ -267,6 +281,7 @@ static final class Fields { static final String TOTAL_SIZE_IN_BYTES = "total_size_in_bytes"; static final String TOTAL_THROTTLE_BYTES_PER_SEC_IN_BYTES = "total_auto_throttle_in_bytes"; static final String TOTAL_THROTTLE_BYTES_PER_SEC = "total_auto_throttle"; + static final String UNREFERENCED_FILE_CLEANUPS_PERFORMED = "unreferenced_file_cleanups_performed"; } @Override @@ -282,5 +297,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(totalStoppedTimeInMillis); out.writeVLong(totalThrottledTimeInMillis); out.writeVLong(totalBytesPerSecAutoThrottle); + if (out.getVersion().onOrAfter(Version.V_2_11_0)) { + out.writeOptionalVLong(unreferencedFileCleanUpsPerformed); + } } } diff --git a/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java index 6b8dd08ed0d91..66c6ee115c3f0 100644 --- a/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/AbstractQueryBuilder.java @@ -298,7 +298,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws I /** * For internal usage only! - * + *

              * Extracts the inner hits from the query tree. * While it extracts inner hits, child inner hits are inlined into the inner hit builder they belong to. */ diff --git a/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java index 65c2dfa9c5a8b..c44a7ef6a397c 100644 --- a/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/BoolQueryBuilder.java @@ -427,4 +427,35 @@ private static boolean rewriteClauses( } return changed; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (mustClauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(Occur.MUST); + for (QueryBuilder mustClause : mustClauses) { + mustClause.visit(subVisitor); + } + } + if (shouldClauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(Occur.SHOULD); + for (QueryBuilder shouldClause : shouldClauses) { + shouldClause.visit(subVisitor); + } + } + if (mustNotClauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(Occur.MUST_NOT); + for (QueryBuilder mustNotClause : mustNotClauses) { + mustNotClause.visit(subVisitor); + } + } + if (filterClauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(Occur.FILTER); + for (QueryBuilder filterClause : filterClauses) { + filterClause.visit(subVisitor); + } + } + + } + } diff --git a/server/src/main/java/org/opensearch/index/query/BoostingQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/BoostingQueryBuilder.java index 26124b422f26f..1b52ae2f03605 100644 --- a/server/src/main/java/org/opensearch/index/query/BoostingQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/BoostingQueryBuilder.java @@ -33,6 +33,7 @@ package org.opensearch.index.query; import org.apache.lucene.queries.function.FunctionScoreQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -252,4 +253,15 @@ protected void extractInnerHitBuilders(Map inner InnerHitContextBuilder.extractInnerHits(positiveQuery, innerHits); InnerHitContextBuilder.extractInnerHits(negativeQuery, innerHits); } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (positiveQuery != null) { + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(positiveQuery); + } + if (negativeQuery != null) { + visitor.getChildVisitor(BooleanClause.Occur.SHOULD).accept(negativeQuery); + } + } } diff --git a/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java index 6a29ad8a0a401..b2764d29da80a 100644 --- a/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/ConstantScoreQueryBuilder.java @@ -32,6 +32,7 @@ package org.opensearch.index.query; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; @@ -183,4 +184,11 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws protected void extractInnerHitBuilders(Map innerHits) { InnerHitContextBuilder.extractInnerHits(filterBuilder, innerHits); } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + visitor.getChildVisitor(BooleanClause.Occur.FILTER).accept(filterBuilder); + } + } diff --git a/server/src/main/java/org/opensearch/index/query/DisMaxQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/DisMaxQueryBuilder.java index 91f4a02fac6c0..bd8ec62f6c43e 100644 --- a/server/src/main/java/org/opensearch/index/query/DisMaxQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/DisMaxQueryBuilder.java @@ -32,6 +32,7 @@ package org.opensearch.index.query; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.Query; import org.opensearch.common.lucene.search.Queries; @@ -246,4 +247,15 @@ protected void extractInnerHitBuilders(Map inner InnerHitContextBuilder.extractInnerHits(query, innerHits); } } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (queries.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(BooleanClause.Occur.SHOULD); + for (QueryBuilder subQb : queries) { + subVisitor.accept(subQb); + } + } + } } diff --git a/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java index 09a71795a3f27..1162689a54689 100644 --- a/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.FieldMaskingSpanQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -207,4 +208,10 @@ protected boolean doEquals(FieldMaskingSpanQueryBuilder other) { public String getWriteableName() { return SPAN_FIELD_MASKING_FIELD.getPreferredName(); } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(queryBuilder); + } } diff --git a/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java index a4b75beab26ea..1fade8601e2a6 100644 --- a/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -60,7 +60,7 @@ /** * Creates a Lucene query that will filter for all documents that lie within the specified * bounding box. - * + *

              * This query can only operate on fields of type geo_point that have latitude and longitude * enabled. * diff --git a/server/src/main/java/org/opensearch/index/query/GeoShapeQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/GeoShapeQueryBuilder.java index e029884f32531..33b896a1d5163 100644 --- a/server/src/main/java/org/opensearch/index/query/GeoShapeQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/GeoShapeQueryBuilder.java @@ -55,7 +55,7 @@ /** * Derived {@link AbstractGeometryQueryBuilder} that builds a lat, lon GeoShape Query. It * can be applied to any {@link MappedFieldType} that implements {@link GeoShapeQueryable}. - * + *

              * GeoJson and WKT shape definitions are supported * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/query/GeoValidationMethod.java b/server/src/main/java/org/opensearch/index/query/GeoValidationMethod.java index 559c084325abb..bb3cd34ae629d 100644 --- a/server/src/main/java/org/opensearch/index/query/GeoValidationMethod.java +++ b/server/src/main/java/org/opensearch/index/query/GeoValidationMethod.java @@ -42,7 +42,7 @@ /** * This enum is used to determine how to deal with invalid geo coordinates in geo related * queries: - * + *

              * On STRICT validation invalid coordinates cause an exception to be thrown. * On IGNORE_MALFORMED invalid coordinates are being accepted. * On COERCE invalid coordinates are being corrected to the most likely valid coordinate. diff --git a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java index 791020c248630..ed9d898543fd5 100644 --- a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java +++ b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java @@ -719,7 +719,7 @@ public static class Regexp extends IntervalsSourceProvider { /** * Constructor - * + *

              * {@code flags} is Lucene's syntax flags * and {@code caseInsensitive} enables Lucene's only matching flag. */ diff --git a/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java index 9a8d2ab104799..7ceb17203e837 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java @@ -176,7 +176,7 @@ public String minimumShouldMatch() { } @Deprecated - /** Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ + /* Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ public MatchBoolPrefixQueryBuilder fuzziness(Object fuzziness) { this.fuzziness = Fuzziness.build(fuzziness); return this; diff --git a/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java index 844d47070923b..5e9e6a3660e76 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java @@ -209,7 +209,7 @@ public String analyzer() { } @Deprecated - /** Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ + /* Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ public MatchQueryBuilder fuzziness(Object fuzziness) { this.fuzziness = Fuzziness.build(fuzziness); return this; diff --git a/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java index 831cdb6f24147..5b3b0e96994f8 100644 --- a/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java @@ -87,7 +87,7 @@ /** * A more like this query that finds documents that are "like" the provided set of document(s). - * + *

              * The documents are provided as a set of strings and/or a list of {@link Item}. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java index 2f04d025a6abe..dd79a56494fa4 100644 --- a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java @@ -405,8 +405,8 @@ public int slop() { } @Deprecated - /** - * Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". + /* + Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ public MultiMatchQueryBuilder fuzziness(Object fuzziness) { if (fuzziness != null) { diff --git a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java index 00a7587afcbfd..176dec5249156 100644 --- a/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/NestedQueryBuilder.java @@ -311,10 +311,13 @@ protected Query doToQuery(QueryShardContext context) throws IOException { parentFilter = context.bitsetFilter(objectMapper.nestedTypeFilter()); } + BitSetProducer previousParentFilter = context.getParentFilter(); try { + context.setParentFilter(parentFilter); context.nestedScope().nextLevel(nestedObjectMapper); innerQuery = this.query.toQuery(context); } finally { + context.setParentFilter(previousParentFilter); context.nestedScope().previousLevel(); } diff --git a/server/src/main/java/org/opensearch/index/query/QueryBuilder.java b/server/src/main/java/org/opensearch/index/query/QueryBuilder.java index a40ccf427794a..090f74c5be7fe 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/QueryBuilder.java @@ -95,4 +95,13 @@ public interface QueryBuilder extends NamedWriteable, ToXContentObject, Rewritea default QueryBuilder rewrite(QueryRewriteContext queryShardContext) throws IOException { return this; } + + /** + * Recurse through the QueryBuilder tree, visiting any child QueryBuilder. + * @param visitor a query builder visitor to be called by each query builder in the tree. + */ + default void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + }; + } diff --git a/server/src/main/java/org/opensearch/index/query/QueryBuilderVisitor.java b/server/src/main/java/org/opensearch/index/query/QueryBuilderVisitor.java new file mode 100644 index 0000000000000..af5a125f9dd95 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/QueryBuilderVisitor.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.search.BooleanClause; + +/** + * QueryBuilderVisitor is an interface to define Visitor Object to be traversed in QueryBuilder tree. + */ +public interface QueryBuilderVisitor { + + /** + * Accept method is called when the visitor accepts the queryBuilder object to be traversed in the query tree. + * @param qb is a queryBuilder object which is accepeted by the visitor. + */ + void accept(QueryBuilder qb); + + /** + * Fetches the child sub visitor from the main QueryBuilderVisitor Object. + * @param occur defines the occurrence of the result fetched from the search query in the final search result. + * @return a child queryBuilder Visitor Object. + */ + QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur); + + /** + * NoopQueryVisitor is a default implementation of QueryBuilderVisitor. + * When a user does not want to implement QueryBuilderVisitor and have to just pass an empty object then this class will be used. + * + */ + QueryBuilderVisitor NO_OP_VISITOR = new QueryBuilderVisitor() { + @Override + public void accept(QueryBuilder qb) { + // Do nothing + } + + @Override + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + return this; + } + }; + +} diff --git a/server/src/main/java/org/opensearch/index/query/QueryShapeVisitor.java b/server/src/main/java/org/opensearch/index/query/QueryShapeVisitor.java new file mode 100644 index 0000000000000..3ba13bc7a2da4 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/QueryShapeVisitor.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.search.BooleanClause; +import org.opensearch.common.SetOnce; + +import java.util.ArrayList; +import java.util.EnumMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +/** + * Class to traverse the QueryBuilder tree and capture the query shape + */ +public final class QueryShapeVisitor implements QueryBuilderVisitor { + private final SetOnce queryType = new SetOnce<>(); + private final Map> childVisitors = new EnumMap<>(BooleanClause.Occur.class); + + @Override + public void accept(QueryBuilder qb) { + queryType.set(qb.getName()); + } + + @Override + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + // Should get called once per Occur value + if (childVisitors.containsKey(occur)) { + throw new IllegalStateException("child visitor already called for " + occur); + } + final List childVisitorList = new ArrayList<>(); + QueryBuilderVisitor childVisitorWrapper = new QueryBuilderVisitor() { + QueryShapeVisitor currentChild; + + @Override + public void accept(QueryBuilder qb) { + currentChild = new QueryShapeVisitor(); + childVisitorList.add(currentChild); + currentChild.accept(qb); + } + + @Override + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + return currentChild.getChildVisitor(occur); + } + }; + childVisitors.put(occur, childVisitorList); + return childVisitorWrapper; + } + + String toJson() { + StringBuilder outputBuilder = new StringBuilder("{\"type\":\"").append(queryType.get()).append("\""); + for (Map.Entry> entry : childVisitors.entrySet()) { + outputBuilder.append(",\"").append(entry.getKey().name().toLowerCase(Locale.ROOT)).append("\"["); + boolean first = true; + for (QueryShapeVisitor child : entry.getValue()) { + if (!first) { + outputBuilder.append(","); + } + outputBuilder.append(child.toJson()); + first = false; + } + outputBuilder.append("]"); + } + outputBuilder.append("}"); + return outputBuilder.toString(); + } + + public String prettyPrintTree(String indent) { + StringBuilder outputBuilder = new StringBuilder(indent).append(queryType.get()).append("\n"); + for (Map.Entry> entry : childVisitors.entrySet()) { + outputBuilder.append(indent).append(" ").append(entry.getKey().name().toLowerCase(Locale.ROOT)).append(":\n"); + for (QueryShapeVisitor child : entry.getValue()) { + outputBuilder.append(child.prettyPrintTree(indent + " ")); + } + } + return outputBuilder.toString(); + } +} diff --git a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java index 0bf05e633bba3..7b248c2a6f3c3 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java @@ -115,6 +115,7 @@ public class QueryShardContext extends QueryRewriteContext { private boolean mapUnmappedFieldAsString; private NestedScope nestedScope; private final ValuesSourceRegistry valuesSourceRegistry; + private BitSetProducer parentFilter; public QueryShardContext( int shardId, @@ -509,7 +510,7 @@ public final void freezeContext() { /** * This method fails if {@link #freezeContext()} is called before on this * context. This is used to seal. - * + *

              * This methods and all methods that call it should be final to ensure that * setting the request as not cacheable and the freezing behaviour of this * class cannot be bypassed. This is important so we can trust when this @@ -622,4 +623,12 @@ public BitsetFilterCache getBitsetFilterCache() { public AggregationUsageService getUsageService() { return valuesSourceRegistry.getUsageService(); } + + public BitSetProducer getParentFilter() { + return parentFilter; + } + + public void setParentFilter(BitSetProducer parentFilter) { + this.parentFilter = parentFilter; + } } diff --git a/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java index 469dc51da323a..3d8fbd5fc436d 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java @@ -119,7 +119,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder * Can be changed back to HashMap once https://issues.apache.org/jira/browse/LUCENE-6305 is fixed. */ private final Map fieldsAndWeights = new TreeMap<>(); diff --git a/server/src/main/java/org/opensearch/index/query/RangeQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/RangeQueryBuilder.java index 0d54373112904..fdbef2c732361 100644 --- a/server/src/main/java/org/opensearch/index/query/RangeQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/RangeQueryBuilder.java @@ -504,9 +504,9 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws @Override protected Query doToQuery(QueryShardContext context) throws IOException { if (from == null && to == null) { - /** - * Open bounds on both side, we can rewrite to an exists query - * if the {@link FieldNamesFieldMapper} is enabled. + /* + Open bounds on both side, we can rewrite to an exists query + if the {@link FieldNamesFieldMapper} is enabled. */ final FieldNamesFieldMapper.FieldNamesFieldType fieldNamesFieldType = (FieldNamesFieldMapper.FieldNamesFieldType) context .getMapperService() diff --git a/server/src/main/java/org/opensearch/index/query/SearchIndexNameMatcher.java b/server/src/main/java/org/opensearch/index/query/SearchIndexNameMatcher.java index 00758309fc0f0..598406b4e45f2 100644 --- a/server/src/main/java/org/opensearch/index/query/SearchIndexNameMatcher.java +++ b/server/src/main/java/org/opensearch/index/query/SearchIndexNameMatcher.java @@ -73,7 +73,7 @@ public SearchIndexNameMatcher( /** * Given an index pattern, checks whether it matches against the current shard. - * + *

              * If this shard represents a remote shard target, then in order to match the pattern contain * the separator ':', and must match on both the cluster alias and index name. */ diff --git a/server/src/main/java/org/opensearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/opensearch/index/query/SimpleQueryStringBuilder.java index 3cd0554af49a4..57ae7dd0ea5e9 100644 --- a/server/src/main/java/org/opensearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SimpleQueryStringBuilder.java @@ -66,7 +66,7 @@ *

            • '{@code -}' negates a single token: {@code -token0} *
            • '{@code "}' creates phrases of terms: {@code "term1 term2 ..."} *
            • '{@code *}' at the end of terms specifies prefix query: {@code term*} - *
            • '{@code (}' and '{@code)}' specifies precedence: {@code token1 + (token2 | token3)} + *
            • '{@code (}' and '{@code )}' specifies precedence: {@code token1 + (token2 | token3)} *
            • '{@code ~}N' at the end of terms specifies fuzzy query: {@code term~1} *
            • '{@code ~}N' at the end of phrases specifies near/slop query: {@code "term1 term2"~5} *
            diff --git a/server/src/main/java/org/opensearch/index/query/SpanContainingQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanContainingQueryBuilder.java index ed4f5c6848b06..32a19ea3e9b50 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanContainingQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanContainingQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.SpanContainingQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -188,4 +189,11 @@ protected boolean doEquals(SpanContainingQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(big); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(little); + } } diff --git a/server/src/main/java/org/opensearch/index/query/SpanFirstQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanFirstQueryBuilder.java index 7427b13463284..bcbc64ddf386d 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanFirstQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanFirstQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.SpanFirstQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -186,4 +187,10 @@ protected boolean doEquals(SpanFirstQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(matchBuilder); + } } diff --git a/server/src/main/java/org/opensearch/index/query/SpanMultiTermQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanMultiTermQueryBuilder.java index ce28391ca478b..96d03c91964e3 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanMultiTermQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanMultiTermQueryBuilder.java @@ -33,6 +33,7 @@ import org.apache.lucene.queries.SpanMatchNoDocsQuery; import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MatchNoDocsQuery; @@ -213,4 +214,12 @@ protected boolean doEquals(SpanMultiTermQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (multiTermQueryBuilder != null) { + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(multiTermQueryBuilder); + } + } } diff --git a/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java index ba7625d94a5a6..30a1c29c29126 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanNearQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.SpanNearQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -299,6 +300,17 @@ public String getWriteableName() { return NAME; } + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (this.clauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(BooleanClause.Occur.MUST); + for (QueryBuilder subQb : this.clauses) { + subVisitor.accept(subQb); + } + } + } + /** * SpanGapQueryBuilder enables gaps in a SpanNearQuery. * Since, SpanGapQuery is private to SpanNearQuery, SpanGapQueryBuilder cannot diff --git a/server/src/main/java/org/opensearch/index/query/SpanNotQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanNotQueryBuilder.java index 98e7f287749f5..59ec5b9d77fc8 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanNotQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanNotQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.SpanNotQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -284,4 +285,16 @@ protected boolean doEquals(SpanNotQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (include != null) { + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(include); + } + + if (exclude != null) { + visitor.getChildVisitor(BooleanClause.Occur.MUST_NOT).accept(exclude); + } + } } diff --git a/server/src/main/java/org/opensearch/index/query/SpanOrQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanOrQueryBuilder.java index 2f63e6d7403f7..fae1e318c66bd 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanOrQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanOrQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.SpanOrQuery; import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -188,4 +189,15 @@ protected boolean doEquals(SpanOrQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (clauses.isEmpty() == false) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(BooleanClause.Occur.SHOULD); + for (QueryBuilder subQb : this.clauses) { + subVisitor.accept(subQb); + } + } + } } diff --git a/server/src/main/java/org/opensearch/index/query/SpanWithinQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/SpanWithinQueryBuilder.java index 5d02cc0026dfd..4d5a6dde61a70 100644 --- a/server/src/main/java/org/opensearch/index/query/SpanWithinQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/SpanWithinQueryBuilder.java @@ -34,6 +34,7 @@ import org.apache.lucene.queries.spans.SpanQuery; import org.apache.lucene.queries.spans.SpanWithinQuery; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.core.ParseField; import org.opensearch.core.common.ParsingException; @@ -197,4 +198,11 @@ protected boolean doEquals(SpanWithinQueryBuilder other) { public String getWriteableName() { return NAME; } + + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(big); + visitor.getChildVisitor(BooleanClause.Occur.MUST).accept(little); + } } diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java index a7e2360325c5a..9d28d81f732df 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java @@ -32,6 +32,7 @@ package org.opensearch.index.query.functionscore; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; @@ -46,6 +47,7 @@ import org.opensearch.index.query.InnerHitContextBuilder; import org.opensearch.index.query.MatchNoneQueryBuilder; import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.QueryShardContext; import org.opensearch.script.ScoreScript; @@ -233,4 +235,12 @@ protected void extractInnerHitBuilders(Map inner InnerHitContextBuilder.extractInnerHits(query(), innerHits); } + @Override + public void visit(QueryBuilderVisitor visitor) { + visitor.accept(this); + if (query != null) { + QueryBuilderVisitor subVisitor = visitor.getChildVisitor(BooleanClause.Occur.MUST); + subVisitor.accept(query); + } + } } diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunctionFactory.java b/server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunctionFactory.java index 4edcd34889abd..9db58f0f78a30 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunctionFactory.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/TermFrequencyFunctionFactory.java @@ -11,7 +11,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.function.FunctionValues; import org.apache.lucene.queries.function.valuesource.SumTotalTermFreqValueSource; -import org.apache.lucene.queries.function.valuesource.TFValueSource; import org.apache.lucene.queries.function.valuesource.TermFreqValueSource; import org.apache.lucene.queries.function.valuesource.TotalTermFreqValueSource; import org.apache.lucene.search.IndexSearcher; @@ -42,15 +41,6 @@ public static TermFrequencyFunction createFunction( TermFreqValueSource termFreqValueSource = new TermFreqValueSource(field, term, field, BytesRefs.toBytesRef(term)); FunctionValues functionValues = termFreqValueSource.getValues(null, readerContext); return docId -> functionValues.intVal(docId); - case TF: - TFValueSource tfValueSource = new TFValueSource(field, term, field, BytesRefs.toBytesRef(term)); - Map tfContext = new HashMap<>() { - { - put("searcher", indexSearcher); - } - }; - functionValues = tfValueSource.getValues(tfContext, readerContext); - return docId -> functionValues.floatVal(docId); case TOTAL_TERM_FREQ: TotalTermFreqValueSource totalTermFreqValueSource = new TotalTermFreqValueSource( field, @@ -78,7 +68,6 @@ public static TermFrequencyFunction createFunction( */ public enum TermFrequencyFunctionName { TERM_FREQ("termFreq"), - TF("tf"), TOTAL_TERM_FREQ("totalTermFreq"), SUM_TOTAL_TERM_FREQ("sumTotalTermFreq"); diff --git a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java index 2617898fef491..23bb4cea17a20 100644 --- a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java +++ b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java @@ -15,17 +15,25 @@ import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexTemplateMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.MetadataCreateIndexService; +import org.opensearch.cluster.metadata.MetadataIndexUpgradeService; +import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; -import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.indices.ShardLimitValidator; import org.opensearch.repositories.IndexId; import org.opensearch.snapshots.RestoreInfo; import org.opensearch.snapshots.RestoreService; @@ -33,11 +41,14 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.function.Function; import java.util.stream.Collectors; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; /** * Service responsible for restoring index data from remote store @@ -51,85 +62,47 @@ public class RemoteStoreRestoreService { private final AllocationService allocationService; - public RemoteStoreRestoreService(ClusterService clusterService, AllocationService allocationService) { + private final MetadataCreateIndexService createIndexService; + + private final MetadataIndexUpgradeService metadataIndexUpgradeService; + + private final ShardLimitValidator shardLimitValidator; + + private final RemoteClusterStateService remoteClusterStateService; + + public RemoteStoreRestoreService( + ClusterService clusterService, + AllocationService allocationService, + MetadataCreateIndexService createIndexService, + MetadataIndexUpgradeService metadataIndexUpgradeService, + ShardLimitValidator shardLimitValidator, + RemoteClusterStateService remoteClusterStateService + ) { this.clusterService = clusterService; this.allocationService = allocationService; + this.createIndexService = createIndexService; + this.metadataIndexUpgradeService = metadataIndexUpgradeService; + this.shardLimitValidator = shardLimitValidator; + this.remoteClusterStateService = remoteClusterStateService; } + /** + * Restores data from remote store for indices specified in the restore request. + * + * @param request restore request + * @param listener restore listener + */ public void restore(RestoreRemoteStoreRequest request, final ActionListener listener) { clusterService.submitStateUpdateTask("restore[remote_store]", new ClusterStateUpdateTask() { - final String restoreUUID = UUIDs.randomBase64UUID(); + String restoreUUID; RestoreInfo restoreInfo = null; @Override public ClusterState execute(ClusterState currentState) { - // Updating cluster state - ClusterState.Builder builder = ClusterState.builder(currentState); - Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); - ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); - RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable()); - - List indicesToBeRestored = new ArrayList<>(); - int totalShards = 0; - for (String index : request.indices()) { - IndexMetadata currentIndexMetadata = currentState.metadata().index(index); - if (currentIndexMetadata == null) { - // ToDo: Handle index metadata does not exist case. (GitHub #3457) - logger.warn("Remote store restore is not supported for non-existent index. Skipping: {}", index); - continue; - } - if (currentIndexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false)) { - IndexMetadata updatedIndexMetadata = currentIndexMetadata; - Map activeInitializingShards = new HashMap<>(); - if (request.restoreAllShards()) { - if (currentIndexMetadata.getState() != IndexMetadata.State.CLOSE) { - throw new IllegalStateException( - "cannot restore index [" - + index - + "] because an open index " - + "with same name already exists in the cluster. Close the existing index" - ); - } - updatedIndexMetadata = IndexMetadata.builder(currentIndexMetadata) - .state(IndexMetadata.State.OPEN) - .version(1 + currentIndexMetadata.getVersion()) - .mappingVersion(1 + currentIndexMetadata.getMappingVersion()) - .settingsVersion(1 + currentIndexMetadata.getSettingsVersion()) - .aliasesVersion(1 + currentIndexMetadata.getAliasesVersion()) - .build(); - } else { - activeInitializingShards = currentState.routingTable() - .index(index) - .shards() - .values() - .stream() - .map(IndexShardRoutingTable::primaryShard) - .filter(shardRouting -> shardRouting.unassigned() == false) - .collect(Collectors.toMap(ShardRouting::shardId, Function.identity())); - } - - IndexId indexId = new IndexId(index, updatedIndexMetadata.getIndexUUID()); - - RecoverySource.RemoteStoreRecoverySource recoverySource = new RecoverySource.RemoteStoreRecoverySource( - restoreUUID, - updatedIndexMetadata.getCreationVersion(), - indexId - ); - rtBuilder.addAsRemoteStoreRestore(updatedIndexMetadata, recoverySource, activeInitializingShards); - blocks.updateBlocks(updatedIndexMetadata); - mdBuilder.put(updatedIndexMetadata, true); - indicesToBeRestored.add(index); - totalShards += updatedIndexMetadata.getNumberOfShards(); - } else { - logger.warn("Remote store is not enabled for index: {}", index); - } - } - - restoreInfo = new RestoreInfo("remote_store", indicesToBeRestored, totalShards, totalShards); - - RoutingTable rt = rtBuilder.build(); - ClusterState updatedState = builder.metadata(mdBuilder).blocks(blocks).routingTable(rt).build(); - return allocationService.reroute(updatedState, "restored from remote store"); + RemoteRestoreResult remoteRestoreResult = restore(currentState, null, request.restoreAllShards(), request.indices()); + restoreUUID = remoteRestoreResult.getRestoreUUID(); + restoreInfo = remoteRestoreResult.getRestoreInfo(); + return remoteRestoreResult.getClusterState(); } @Override @@ -140,7 +113,7 @@ public void onFailure(String source, Exception e) { @Override public TimeValue timeout() { - return request.masterNodeTimeout(); + return request.clusterManagerNodeTimeout(); } @Override @@ -148,6 +121,201 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS listener.onResponse(new RestoreService.RestoreCompletionResponse(restoreUUID, null, restoreInfo)); } }); + } + + /** + * Executes remote restore + * @param currentState current cluster state + * @param restoreClusterUUID cluster UUID used to restore IndexMetadata + * @param restoreAllShards indicates if all shards of the index needs to be restored. This flag is ignored if remoteClusterUUID is provided + * @param indexNames list of indices to restore. This list is ignored if remoteClusterUUID is provided + * @return remote restore result + */ + public RemoteRestoreResult restore( + ClusterState currentState, + @Nullable String restoreClusterUUID, + boolean restoreAllShards, + String[] indexNames + ) { + Map> indexMetadataMap = new HashMap<>(); + ClusterState remoteState = null; + boolean metadataFromRemoteStore = (restoreClusterUUID == null + || restoreClusterUUID.isEmpty() + || restoreClusterUUID.isBlank()) == false; + if (metadataFromRemoteStore) { + try { + // Restore with current cluster UUID will fail as same indices would be present in the cluster which we are trying to + // restore + if (currentState.metadata().clusterUUID().equals(restoreClusterUUID)) { + throw new IllegalArgumentException("clusterUUID to restore from should be different from current cluster UUID"); + } + logger.info("Restoring cluster state from remote store from cluster UUID : [{}]", restoreClusterUUID); + remoteState = remoteClusterStateService.getLatestClusterState(currentState.getClusterName().value(), restoreClusterUUID); + remoteState.getMetadata().getIndices().values().forEach(indexMetadata -> { + indexMetadataMap.put(indexMetadata.getIndex().getName(), new Tuple<>(true, indexMetadata)); + }); + } catch (Exception e) { + throw new IllegalStateException("Unable to restore remote index metadata", e); + } + } else { + for (String indexName : indexNames) { + IndexMetadata indexMetadata = currentState.metadata().index(indexName); + if (indexMetadata == null) { + logger.warn("Index restore is not supported for non-existent index. Skipping: {}", indexName); + } else if (indexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false) == false) { + logger.warn("Remote store is not enabled for index: {}", indexName); + } else if (restoreAllShards && IndexMetadata.State.CLOSE.equals(indexMetadata.getState()) == false) { + throw new IllegalStateException( + String.format( + Locale.ROOT, + "cannot restore index [%s] because an open index with same name/uuid already exists in the cluster.", + indexName + ) + " Close the existing index." + ); + } else { + indexMetadataMap.put(indexName, new Tuple<>(false, indexMetadata)); + } + } + } + return executeRestore(currentState, indexMetadataMap, restoreAllShards, remoteState); + } + + /** + * Executes remote restore + * @param currentState current cluster state + * @param indexMetadataMap map of index metadata to restore + * @param restoreAllShards indicates if all shards of the index needs to be restored + * @return remote restore result + */ + private RemoteRestoreResult executeRestore( + ClusterState currentState, + Map> indexMetadataMap, + boolean restoreAllShards, + ClusterState remoteState + ) { + final String restoreUUID = UUIDs.randomBase64UUID(); + List indicesToBeRestored = new ArrayList<>(); + int totalShards = 0; + boolean metadataFromRemoteStore = false; + ClusterState.Builder builder = ClusterState.builder(currentState); + Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); + ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); + RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable()); + for (Map.Entry> indexMetadataEntry : indexMetadataMap.entrySet()) { + String indexName = indexMetadataEntry.getKey(); + IndexMetadata indexMetadata = indexMetadataEntry.getValue().v2(); + metadataFromRemoteStore = indexMetadataEntry.getValue().v1(); + IndexMetadata updatedIndexMetadata = indexMetadata; + if (metadataFromRemoteStore == false && restoreAllShards) { + updatedIndexMetadata = IndexMetadata.builder(indexMetadata) + .state(IndexMetadata.State.OPEN) + .version(1 + indexMetadata.getVersion()) + .mappingVersion(1 + indexMetadata.getMappingVersion()) + .settingsVersion(1 + indexMetadata.getSettingsVersion()) + .aliasesVersion(1 + indexMetadata.getAliasesVersion()) + .build(); + } + + IndexId indexId = new IndexId(indexName, updatedIndexMetadata.getIndexUUID()); + + if (metadataFromRemoteStore == false) { + Map indexShardRoutingTableMap = currentState.routingTable() + .index(indexName) + .shards() + .values() + .stream() + .collect(Collectors.toMap(IndexShardRoutingTable::shardId, Function.identity())); + + RecoverySource.RemoteStoreRecoverySource recoverySource = new RecoverySource.RemoteStoreRecoverySource( + restoreUUID, + updatedIndexMetadata.getCreationVersion(), + indexId + ); + + rtBuilder.addAsRemoteStoreRestore(updatedIndexMetadata, recoverySource, indexShardRoutingTableMap, restoreAllShards); + } + + blocks.updateBlocks(updatedIndexMetadata); + mdBuilder.put(updatedIndexMetadata, true); + indicesToBeRestored.add(indexName); + totalShards += updatedIndexMetadata.getNumberOfShards(); + } + + if (remoteState != null) { + restoreGlobalMetadata(mdBuilder, remoteState.getMetadata()); + // Restore ClusterState version + logger.info("Restoring ClusterState with Remote State version [{}]", remoteState.version()); + builder.version(remoteState.version()); + } + + RestoreInfo restoreInfo = new RestoreInfo("remote_store", indicesToBeRestored, totalShards, totalShards); + + RoutingTable rt = rtBuilder.build(); + ClusterState updatedState = builder.metadata(mdBuilder).blocks(blocks).routingTable(rt).build(); + if (metadataFromRemoteStore == false) { + updatedState = allocationService.reroute(updatedState, "restored from remote store"); + } + return RemoteRestoreResult.build(restoreUUID, restoreInfo, updatedState); + } + + private void restoreGlobalMetadata(Metadata.Builder mdBuilder, Metadata remoteMetadata) { + if (remoteMetadata.persistentSettings() != null) { + Settings settings = remoteMetadata.persistentSettings(); + clusterService.getClusterSettings().validateUpdate(settings); + mdBuilder.persistentSettings(settings); + } + if (remoteMetadata.templates() != null) { + for (final IndexTemplateMetadata cursor : remoteMetadata.templates().values()) { + mdBuilder.put(cursor); + } + } + if (remoteMetadata.customs() != null) { + for (final Map.Entry cursor : remoteMetadata.customs().entrySet()) { + if (RepositoriesMetadata.TYPE.equals(cursor.getKey()) == false) { + mdBuilder.putCustom(cursor.getKey(), cursor.getValue()); + } + } + } + Optional repositoriesMetadata = Optional.ofNullable(remoteMetadata.custom(RepositoriesMetadata.TYPE)); + repositoriesMetadata = repositoriesMetadata.map( + repositoriesMetadata1 -> new RepositoriesMetadata( + repositoriesMetadata1.repositories() + .stream() + .filter(repository -> SYSTEM_REPOSITORY_SETTING.get(repository.settings()) == false) + .collect(Collectors.toList()) + ) + ); + repositoriesMetadata.ifPresent(metadata -> mdBuilder.putCustom(RepositoriesMetadata.TYPE, metadata)); + } + + /** + * Result of a remote restore operation. + */ + public static class RemoteRestoreResult { + private final ClusterState clusterState; + private final RestoreInfo restoreInfo; + private final String restoreUUID; + + private RemoteRestoreResult(String restoreUUID, RestoreInfo restoreInfo, ClusterState clusterState) { + this.clusterState = clusterState; + this.restoreInfo = restoreInfo; + this.restoreUUID = restoreUUID; + } + + public static RemoteRestoreResult build(String restoreUUID, RestoreInfo restoreInfo, ClusterState clusterState) { + return new RemoteRestoreResult(restoreUUID, restoreInfo, clusterState); + } + + public ClusterState getClusterState() { + return clusterState; + } + + public RestoreInfo getRestoreInfo() { + return restoreInfo; + } + public String getRestoreUUID() { + return restoreUUID; + } } } diff --git a/server/src/main/java/org/opensearch/index/reindex/BulkByScrollTask.java b/server/src/main/java/org/opensearch/index/reindex/BulkByScrollTask.java index f33800659245f..d7c0da4773fff 100644 --- a/server/src/main/java/org/opensearch/index/reindex/BulkByScrollTask.java +++ b/server/src/main/java/org/opensearch/index/reindex/BulkByScrollTask.java @@ -73,13 +73,13 @@ /** * Task storing information about a currently running BulkByScroll request. - * + *

            * When the request is not sliced, this task is the only task created, and starts an action to perform search requests. - * + *

            * When the request is sliced, this task can either represent a coordinating task (using * {@link BulkByScrollTask#setWorkerCount(int)}) or a worker task that performs search queries (using * {@link BulkByScrollTask#setWorker(float, Integer)}). - * + *

            * We don't always know if this task will be a leader or worker task when it's created, because if slices is set to "auto" it may * be either depending on the number of shards in the source indices. We figure that out when the request is handled and set it on this * class with {@link #setWorkerCount(int)} or {@link #setWorker(float, Integer)}. diff --git a/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java b/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java index aff9ec1f20e46..4963080f5916c 100644 --- a/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java +++ b/server/src/main/java/org/opensearch/index/reindex/DeleteByQueryRequest.java @@ -49,7 +49,7 @@ /** * Creates a new {@link DeleteByQueryRequest} that uses scrolling and bulk requests to delete all documents matching * the query. This can have performance as well as visibility implications. - * + *

            * Delete-by-query now has the following semantics: *

              *
            • it's {@code non-atomic}, a delete-by-query may fail at any time while some documents matching the query have already been diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java index f834f4ad9583d..4228ec60c4524 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentStats.java @@ -8,6 +8,7 @@ package org.opensearch.index.remote; +import org.opensearch.Version; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; @@ -19,6 +20,7 @@ import org.opensearch.index.shard.IndexShard; import java.io.IOException; +import java.util.Objects; /** * Tracks remote store segment download and upload stats @@ -61,6 +63,23 @@ public class RemoteSegmentStats implements Writeable, ToXContentFragment { * Used to check for data freshness in the remote store */ private long maxRefreshBytesLag; + /** + * Total refresh lag (in bytes) between local and the remote store + * Used to check for data freshness in the remote store + */ + private long totalRefreshBytesLag; + /** + * Total time spent in uploading segments to remote store + */ + private long totalUploadTime; + /** + * Total time spent in downloading segments from remote store + */ + private long totalDownloadTime; + /** + * Total rejections due to remote store upload backpressure + */ + private long totalRejections; public RemoteSegmentStats() {} @@ -73,6 +92,12 @@ public RemoteSegmentStats(StreamInput in) throws IOException { downloadBytesSucceeded = in.readLong(); maxRefreshTimeLag = in.readLong(); maxRefreshBytesLag = in.readLong(); + totalRefreshBytesLag = in.readLong(); + totalUploadTime = in.readLong(); + totalDownloadTime = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + totalRejections = in.readVLong(); + } } /** @@ -91,10 +116,18 @@ public RemoteSegmentStats(RemoteSegmentTransferTracker.Stats trackerStats) { this.downloadBytesStarted = trackerStats.directoryFileTransferTrackerStats.transferredBytesStarted; this.downloadBytesFailed = trackerStats.directoryFileTransferTrackerStats.transferredBytesFailed; this.maxRefreshTimeLag = trackerStats.refreshTimeLagMs; + // Initializing both total and max bytes lag to the same `bytesLag` + // value from the tracker object + // Aggregations would be performed on the add method this.maxRefreshBytesLag = trackerStats.bytesLag; + this.totalRefreshBytesLag = trackerStats.bytesLag; + this.totalUploadTime = trackerStats.totalUploadTimeInMs; + this.totalDownloadTime = trackerStats.directoryFileTransferTrackerStats.totalTransferTimeInMs; + this.totalRejections = trackerStats.rejectionCount; } // Getter and setters. All are visible for testing + // Setters are only used for testing public long getUploadBytesStarted() { return uploadBytesStarted; } @@ -155,8 +188,40 @@ public long getMaxRefreshBytesLag() { return maxRefreshBytesLag; } - public void setMaxRefreshBytesLag(long maxRefreshBytesLag) { - this.maxRefreshBytesLag = maxRefreshBytesLag; + public void addMaxRefreshBytesLag(long maxRefreshBytesLag) { + this.maxRefreshBytesLag = Math.max(this.maxRefreshBytesLag, maxRefreshBytesLag); + } + + public long getTotalRefreshBytesLag() { + return totalRefreshBytesLag; + } + + public void addTotalRefreshBytesLag(long totalRefreshBytesLag) { + this.totalRefreshBytesLag += totalRefreshBytesLag; + } + + public long getTotalUploadTime() { + return totalUploadTime; + } + + public void addTotalUploadTime(long totalUploadTime) { + this.totalUploadTime += totalUploadTime; + } + + public long getTotalDownloadTime() { + return totalDownloadTime; + } + + public void addTotalDownloadTime(long totalDownloadTime) { + this.totalDownloadTime += totalDownloadTime; + } + + public long getTotalRejections() { + return totalRejections; + } + + public void addTotalRejections(long totalRejections) { + this.totalRejections += totalRejections; } /** @@ -174,6 +239,10 @@ public void add(RemoteSegmentStats existingStats) { this.downloadBytesSucceeded += existingStats.getDownloadBytesSucceeded(); this.maxRefreshTimeLag = Math.max(this.maxRefreshTimeLag, existingStats.getMaxRefreshTimeLag()); this.maxRefreshBytesLag = Math.max(this.maxRefreshBytesLag, existingStats.getMaxRefreshBytesLag()); + this.totalRefreshBytesLag += existingStats.getTotalRefreshBytesLag(); + this.totalUploadTime += existingStats.getTotalUploadTime(); + this.totalDownloadTime += existingStats.getTotalDownloadTime(); + this.totalRejections += existingStats.totalRejections; } } @@ -187,50 +256,120 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(downloadBytesSucceeded); out.writeLong(maxRefreshTimeLag); out.writeLong(maxRefreshBytesLag); + out.writeLong(totalRefreshBytesLag); + out.writeLong(totalUploadTime); + out.writeLong(totalDownloadTime); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeVLong(totalRejections); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.REMOTE_STORE); + builder.startObject(Fields.UPLOAD); - builder.startObject(Fields.TOTAL_UPLOADS); + buildUploadStats(builder); + builder.endObject(); // UPLOAD + + builder.startObject(Fields.DOWNLOAD); + buildDownloadStats(builder); + builder.endObject(); // DOWNLOAD + + builder.endObject(); // REMOTE_STORE + + return builder; + } + + private void buildUploadStats(XContentBuilder builder) throws IOException { + builder.startObject(Fields.TOTAL_UPLOAD_SIZE); builder.humanReadableField(Fields.STARTED_BYTES, Fields.STARTED, new ByteSizeValue(uploadBytesStarted)); builder.humanReadableField(Fields.SUCCEEDED_BYTES, Fields.SUCCEEDED, new ByteSizeValue(uploadBytesSucceeded)); builder.humanReadableField(Fields.FAILED_BYTES, Fields.FAILED, new ByteSizeValue(uploadBytesFailed)); - builder.endObject(); + builder.endObject(); // TOTAL_UPLOAD_SIZE + + builder.startObject(Fields.REFRESH_SIZE_LAG); + builder.humanReadableField(Fields.TOTAL_BYTES, Fields.TOTAL, new ByteSizeValue(totalRefreshBytesLag)); + builder.humanReadableField(Fields.MAX_BYTES, Fields.MAX, new ByteSizeValue(maxRefreshBytesLag)); + builder.endObject(); // REFRESH_SIZE_LAG + builder.humanReadableField(Fields.MAX_REFRESH_TIME_LAG_IN_MILLIS, Fields.MAX_REFRESH_TIME_LAG, new TimeValue(maxRefreshTimeLag)); - builder.humanReadableField( - Fields.MAX_REFRESH_SIZE_LAG_IN_MILLIS, - Fields.MAX_REFRESH_SIZE_LAG, - new ByteSizeValue(maxRefreshBytesLag) - ); - builder.endObject(); - builder.startObject(Fields.DOWNLOAD); - builder.startObject(Fields.TOTAL_DOWNLOADS); + builder.humanReadableField(Fields.TOTAL_TIME_SPENT_IN_MILLIS, Fields.TOTAL_TIME_SPENT, new TimeValue(totalUploadTime)); + + builder.startObject(Fields.PRESSURE); + builder.field(Fields.TOTAL_REJECTIONS, totalRejections); + builder.endObject(); // PRESSURE + } + + private void buildDownloadStats(XContentBuilder builder) throws IOException { + builder.startObject(Fields.TOTAL_DOWNLOAD_SIZE); builder.humanReadableField(Fields.STARTED_BYTES, Fields.STARTED, new ByteSizeValue(downloadBytesStarted)); builder.humanReadableField(Fields.SUCCEEDED_BYTES, Fields.SUCCEEDED, new ByteSizeValue(downloadBytesSucceeded)); builder.humanReadableField(Fields.FAILED_BYTES, Fields.FAILED, new ByteSizeValue(downloadBytesFailed)); builder.endObject(); - builder.endObject(); - builder.endObject(); - return builder; + builder.humanReadableField(Fields.TOTAL_TIME_SPENT_IN_MILLIS, Fields.TOTAL_TIME_SPENT, new TimeValue(totalDownloadTime)); } static final class Fields { static final String REMOTE_STORE = "remote_store"; static final String UPLOAD = "upload"; static final String DOWNLOAD = "download"; - static final String TOTAL_UPLOADS = "total_uploads"; - static final String TOTAL_DOWNLOADS = "total_downloads"; + static final String TOTAL_UPLOAD_SIZE = "total_upload_size"; + static final String TOTAL_DOWNLOAD_SIZE = "total_download_size"; + static final String MAX_REFRESH_TIME_LAG = "max_refresh_time_lag"; + static final String MAX_REFRESH_TIME_LAG_IN_MILLIS = "max_refresh_time_lag_in_millis"; + static final String REFRESH_SIZE_LAG = "refresh_size_lag"; static final String STARTED = "started"; static final String STARTED_BYTES = "started_bytes"; static final String FAILED = "failed"; static final String FAILED_BYTES = "failed_bytes"; static final String SUCCEEDED = "succeeded"; static final String SUCCEEDED_BYTES = "succeeded_bytes"; - static final String MAX_REFRESH_TIME_LAG = "max_refresh_time_lag"; - static final String MAX_REFRESH_TIME_LAG_IN_MILLIS = "max_refresh_time_lag_in_millis"; - static final String MAX_REFRESH_SIZE_LAG = "max_refresh_size_lag"; - static final String MAX_REFRESH_SIZE_LAG_IN_MILLIS = "max_refresh_size_lag_in_bytes"; + static final String TOTAL = "total"; + static final String TOTAL_BYTES = "total_bytes"; + static final String MAX = "max"; + static final String MAX_BYTES = "max_bytes"; + static final String TOTAL_TIME_SPENT = "total_time_spent"; + static final String TOTAL_TIME_SPENT_IN_MILLIS = "total_time_spent_in_millis"; + static final String PRESSURE = "pressure"; + static final String TOTAL_REJECTIONS = "total_rejections"; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RemoteSegmentStats that = (RemoteSegmentStats) o; + return uploadBytesStarted == that.uploadBytesStarted + && uploadBytesFailed == that.uploadBytesFailed + && uploadBytesSucceeded == that.uploadBytesSucceeded + && downloadBytesStarted == that.downloadBytesStarted + && downloadBytesFailed == that.downloadBytesFailed + && downloadBytesSucceeded == that.downloadBytesSucceeded + && maxRefreshTimeLag == that.maxRefreshTimeLag + && maxRefreshBytesLag == that.maxRefreshBytesLag + && totalRefreshBytesLag == that.totalRefreshBytesLag + && totalUploadTime == that.totalUploadTime + && totalDownloadTime == that.totalDownloadTime + && totalRejections == that.totalRejections; + } + + @Override + public int hashCode() { + return Objects.hash( + uploadBytesStarted, + uploadBytesFailed, + uploadBytesSucceeded, + downloadBytesStarted, + downloadBytesFailed, + downloadBytesSucceeded, + maxRefreshTimeLag, + maxRefreshBytesLag, + totalRefreshBytesLag, + totalUploadTime, + totalDownloadTime, + totalRejections + ); } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java index 1531f74597a03..fb65d9ef83be2 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.common.CheckedFunction; import org.opensearch.common.logging.Loggers; -import org.opensearch.common.util.MovingAverage; import org.opensearch.common.util.Streak; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.common.io.stream.StreamInput; @@ -28,8 +27,8 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static org.opensearch.index.shard.RemoteStoreRefreshListener.EXCLUDE_FILES; @@ -39,15 +38,10 @@ * * @opensearch.internal */ -public class RemoteSegmentTransferTracker { +public class RemoteSegmentTransferTracker extends RemoteTransferTracker { private final Logger logger; - /** - * ShardId for which this instance tracks the remote segment upload metadata. - */ - private final ShardId shardId; - /** * Every refresh is assigned a sequence number. This is the sequence number of the most recent refresh. */ @@ -73,6 +67,12 @@ public class RemoteSegmentTransferTracker { */ private volatile long remoteRefreshTimeMs; + /** + * This is the time of first local refresh after the last successful remote refresh. When the remote store is in + * sync with local refresh, this will be reset to -1. + */ + private volatile long remoteRefreshStartTimeMs = -1; + /** * The refresh time(clock) of most recent remote refresh. */ @@ -83,46 +83,11 @@ public class RemoteSegmentTransferTracker { */ private volatile long refreshSeqNoLag; - /** - * Keeps the time (ms) lag computed so that we do not compute it for every request. - */ - private volatile long timeMsLag; - /** * Keeps track of the total bytes of segment files which were uploaded to remote store during last successful remote refresh */ private volatile long lastSuccessfulRemoteRefreshBytes; - /** - * Cumulative sum of size in bytes of segment files for which upload has started during remote refresh. - */ - private volatile long uploadBytesStarted; - - /** - * Cumulative sum of size in bytes of segment files for which upload has failed during remote refresh. - */ - private volatile long uploadBytesFailed; - - /** - * Cumulative sum of size in bytes of segment files for which upload has succeeded during remote refresh. - */ - private volatile long uploadBytesSucceeded; - - /** - * Cumulative sum of count of remote refreshes that have started. - */ - private volatile long totalUploadsStarted; - - /** - * Cumulative sum of count of remote refreshes that have failed. - */ - private volatile long totalUploadsFailed; - - /** - * Cumulative sum of count of remote refreshes that have succeeded. - */ - private volatile long totalUploadsSucceeded; - /** * Cumulative sum of rejection counts for this shard. */ @@ -154,33 +119,6 @@ public class RemoteSegmentTransferTracker { */ private final Streak failures = new Streak(); - /** - * Provides moving average over the last N total size in bytes of segment files uploaded as part of remote refresh. - * N is window size. Wrapped with {@code AtomicReference} for dynamic changes in window size. - */ - private final AtomicReference uploadBytesMovingAverageReference; - - /** - * This lock object is used for making sure we do not miss any data - */ - private final Object uploadBytesMutex = new Object(); - - /** - * Provides moving average over the last N upload speed (in bytes/s) of segment files uploaded as part of remote refresh. - * N is window size. Wrapped with {@code AtomicReference} for dynamic changes in window size. - */ - private final AtomicReference uploadBytesPerSecMovingAverageReference; - - private final Object uploadBytesPerSecMutex = new Object(); - - /** - * Provides moving average over the last N overall upload time (in nanos) as part of remote refresh.N is window size. - * Wrapped with {@code AtomicReference} for dynamic changes in window size. - */ - private final AtomicReference uploadTimeMsMovingAverageReference; - - private final Object uploadTimeMsMutex = new Object(); - /** * {@link org.opensearch.index.store.Store.StoreDirectory} level file transfer tracker, used to show download stats */ @@ -189,27 +127,36 @@ public class RemoteSegmentTransferTracker { public RemoteSegmentTransferTracker( ShardId shardId, DirectoryFileTransferTracker directoryFileTransferTracker, - int uploadBytesMovingAverageWindowSize, - int uploadBytesPerSecMovingAverageWindowSize, - int uploadTimeMsMovingAverageWindowSize + int movingAverageWindowSize ) { + super(shardId, movingAverageWindowSize); + logger = Loggers.getLogger(getClass(), shardId); - this.shardId = shardId; // Both the local refresh time and remote refresh time are set with current time to give consistent view of time lag when it arises. long currentClockTimeMs = System.currentTimeMillis(); - long currentTimeMs = System.nanoTime() / 1_000_000L; + long currentTimeMs = currentTimeMsUsingSystemNanos(); localRefreshTimeMs = currentTimeMs; remoteRefreshTimeMs = currentTimeMs; + remoteRefreshStartTimeMs = currentTimeMs; localRefreshClockTimeMs = currentClockTimeMs; remoteRefreshClockTimeMs = currentClockTimeMs; - uploadBytesMovingAverageReference = new AtomicReference<>(new MovingAverage(uploadBytesMovingAverageWindowSize)); - uploadBytesPerSecMovingAverageReference = new AtomicReference<>(new MovingAverage(uploadBytesPerSecMovingAverageWindowSize)); - uploadTimeMsMovingAverageReference = new AtomicReference<>(new MovingAverage(uploadTimeMsMovingAverageWindowSize)); this.directoryFileTransferTracker = directoryFileTransferTracker; } - ShardId getShardId() { - return shardId; + public static long currentTimeMsUsingSystemNanos() { + return TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); + } + + @Override + public void incrementTotalUploadsFailed() { + super.incrementTotalUploadsFailed(); + failures.record(true); + } + + @Override + public void incrementTotalUploadsSucceeded() { + super.incrementTotalUploadsSucceeded(); + failures.record(false); } public long getLocalRefreshSeqNo() { @@ -240,19 +187,22 @@ public long getLocalRefreshClockTimeMs() { */ public void updateLocalRefreshTimeAndSeqNo() { updateLocalRefreshClockTimeMs(System.currentTimeMillis()); - updateLocalRefreshTimeMs(System.nanoTime() / 1_000_000L); + updateLocalRefreshTimeMs(currentTimeMsUsingSystemNanos()); updateLocalRefreshSeqNo(getLocalRefreshSeqNo() + 1); } // Visible for testing - void updateLocalRefreshTimeMs(long localRefreshTimeMs) { + synchronized void updateLocalRefreshTimeMs(long localRefreshTimeMs) { assert localRefreshTimeMs >= this.localRefreshTimeMs : "newLocalRefreshTimeMs=" + localRefreshTimeMs + " < " + "currentLocalRefreshTimeMs=" + this.localRefreshTimeMs; + boolean isRemoteInSyncBeforeLocalRefresh = this.localRefreshTimeMs == this.remoteRefreshTimeMs; this.localRefreshTimeMs = localRefreshTimeMs; - computeTimeMsLag(); + if (isRemoteInSyncBeforeLocalRefresh) { + this.remoteRefreshStartTimeMs = localRefreshTimeMs; + } } private void updateLocalRefreshClockTimeMs(long localRefreshClockTimeMs) { @@ -281,14 +231,18 @@ long getRemoteRefreshClockTimeMs() { return remoteRefreshClockTimeMs; } - public void updateRemoteRefreshTimeMs(long remoteRefreshTimeMs) { - assert remoteRefreshTimeMs >= this.remoteRefreshTimeMs : "newRemoteRefreshTimeMs=" - + remoteRefreshTimeMs + public synchronized void updateRemoteRefreshTimeMs(long refreshTimeMs) { + assert refreshTimeMs >= this.remoteRefreshTimeMs : "newRemoteRefreshTimeMs=" + + refreshTimeMs + " < " + "currentRemoteRefreshTimeMs=" + this.remoteRefreshTimeMs; - this.remoteRefreshTimeMs = remoteRefreshTimeMs; - computeTimeMsLag(); + this.remoteRefreshTimeMs = refreshTimeMs; + // When multiple refreshes have failed, there is a possibility that retry is ongoing while another refresh gets + // triggered. After the segments have been uploaded and before the below code runs, the updateLocalRefreshTimeAndSeqNo + // method is triggered, which will update the local localRefreshTimeMs. Now, the lag would basically become the + // time since the last refresh happened locally. + this.remoteRefreshStartTimeMs = refreshTimeMs == this.localRefreshTimeMs ? -1 : this.localRefreshTimeMs; } public void updateRemoteRefreshClockTimeMs(long remoteRefreshClockTimeMs) { @@ -303,81 +257,31 @@ public long getRefreshSeqNoLag() { return refreshSeqNoLag; } - private void computeTimeMsLag() { - timeMsLag = localRefreshTimeMs - remoteRefreshTimeMs; - } - public long getTimeMsLag() { - return timeMsLag; + if (remoteRefreshTimeMs == localRefreshTimeMs) { + return 0; + } + return currentTimeMsUsingSystemNanos() - remoteRefreshStartTimeMs; } public long getBytesLag() { return bytesLag; } - public long getUploadBytesStarted() { - return uploadBytesStarted; - } - - public void addUploadBytesStarted(long size) { - uploadBytesStarted += size; - } - - public long getUploadBytesFailed() { - return uploadBytesFailed; - } - - public void addUploadBytesFailed(long size) { - uploadBytesFailed += size; - } - - public long getUploadBytesSucceeded() { - return uploadBytesSucceeded; - } - - public void addUploadBytesSucceeded(long size) { - uploadBytesSucceeded += size; - } - public long getInflightUploadBytes() { - return uploadBytesStarted - uploadBytesFailed - uploadBytesSucceeded; - } - - public long getTotalUploadsStarted() { - return totalUploadsStarted; - } - - public void incrementTotalUploadsStarted() { - totalUploadsStarted += 1; - } - - public long getTotalUploadsFailed() { - return totalUploadsFailed; - } - - public void incrementTotalUploadsFailed() { - totalUploadsFailed += 1; - failures.record(true); - } - - public long getTotalUploadsSucceeded() { - return totalUploadsSucceeded; - } - - public void incrementTotalUploadsSucceeded() { - totalUploadsSucceeded += 1; - failures.record(false); + return uploadBytesStarted.get() - uploadBytesFailed.get() - uploadBytesSucceeded.get(); } public long getInflightUploads() { - return totalUploadsStarted - totalUploadsFailed - totalUploadsSucceeded; + return totalUploadsStarted.get() - totalUploadsFailed.get() - totalUploadsSucceeded.get(); } public long getRejectionCount() { return rejectionCount.get(); } - void incrementRejectionCount() { + /** public only for testing **/ + public void incrementRejectionCount() { rejectionCount.incrementAndGet(); } @@ -404,6 +308,11 @@ public void updateLatestLocalFileNameLengthMap( Collection segmentFiles, CheckedFunction fileSizeFunction ) { + logger.debug( + "segmentFilesPostRefresh={} latestLocalFileNamesBeforeMapUpdate={}", + segmentFiles, + latestLocalFileNameLengthMap.keySet() + ); // Update the map segmentFiles.stream() .filter(file -> EXCLUDE_FILES.contains(file) == false) @@ -449,82 +358,6 @@ int getConsecutiveFailureCount() { return failures.length(); } - boolean isUploadBytesAverageReady() { - return uploadBytesMovingAverageReference.get().isReady(); - } - - double getUploadBytesAverage() { - return uploadBytesMovingAverageReference.get().getAverage(); - } - - public void addUploadBytes(long size) { - lastSuccessfulRemoteRefreshBytes = size; - synchronized (uploadBytesMutex) { - this.uploadBytesMovingAverageReference.get().record(size); - } - } - - /** - * Updates the window size for data collection of upload bytes. This also resets any data collected so far. - * - * @param updatedSize the updated size - */ - void updateUploadBytesMovingAverageWindowSize(int updatedSize) { - synchronized (uploadBytesMutex) { - this.uploadBytesMovingAverageReference.set(this.uploadBytesMovingAverageReference.get().copyWithSize(updatedSize)); - } - } - - boolean isUploadBytesPerSecAverageReady() { - return uploadBytesPerSecMovingAverageReference.get().isReady(); - } - - double getUploadBytesPerSecAverage() { - return uploadBytesPerSecMovingAverageReference.get().getAverage(); - } - - public void addUploadBytesPerSec(long bytesPerSec) { - synchronized (uploadBytesPerSecMutex) { - this.uploadBytesPerSecMovingAverageReference.get().record(bytesPerSec); - } - } - - /** - * Updates the window size for data collection of upload bytes per second. This also resets any data collected so far. - * - * @param updatedSize the updated size - */ - void updateUploadBytesPerSecMovingAverageWindowSize(int updatedSize) { - synchronized (uploadBytesPerSecMutex) { - this.uploadBytesPerSecMovingAverageReference.set(this.uploadBytesPerSecMovingAverageReference.get().copyWithSize(updatedSize)); - } - } - - boolean isUploadTimeMsAverageReady() { - return uploadTimeMsMovingAverageReference.get().isReady(); - } - - double getUploadTimeMsAverage() { - return uploadTimeMsMovingAverageReference.get().getAverage(); - } - - public void addUploadTimeMs(long timeMs) { - synchronized (uploadTimeMsMutex) { - this.uploadTimeMsMovingAverageReference.get().record(timeMs); - } - } - - /** - * Updates the window size for data collection of upload time (ms). This also resets any data collected so far. - * - * @param updatedSize the updated size - */ - void updateUploadTimeMsMovingAverageWindowSize(int updatedSize) { - synchronized (uploadTimeMsMutex) { - this.uploadTimeMsMovingAverageReference.set(this.uploadTimeMsMovingAverageReference.get().copyWithSize(updatedSize)); - } - } - public DirectoryFileTransferTracker getDirectoryFileTransferTracker() { return directoryFileTransferTracker; } @@ -534,15 +367,15 @@ public RemoteSegmentTransferTracker.Stats stats() { shardId, localRefreshClockTimeMs, remoteRefreshClockTimeMs, - timeMsLag, + getTimeMsLag(), localRefreshSeqNo, remoteRefreshSeqNo, - uploadBytesStarted, - uploadBytesSucceeded, - uploadBytesFailed, - totalUploadsStarted, - totalUploadsSucceeded, - totalUploadsFailed, + uploadBytesStarted.get(), + uploadBytesSucceeded.get(), + uploadBytesFailed.get(), + totalUploadsStarted.get(), + totalUploadsSucceeded.get(), + totalUploadsFailed.get(), rejectionCount.get(), failures.length(), lastSuccessfulRemoteRefreshBytes, @@ -550,6 +383,7 @@ public RemoteSegmentTransferTracker.Stats stats() { uploadBytesPerSecMovingAverageReference.get().getAverage(), uploadTimeMsMovingAverageReference.get().getAverage(), getBytesLag(), + totalUploadTimeInMillis.get(), directoryFileTransferTracker.stats() ); } @@ -578,6 +412,7 @@ public static class Stats implements Writeable { public final long lastSuccessfulRemoteRefreshBytes; public final double uploadBytesMovingAverage; public final double uploadBytesPerSecMovingAverage; + public final long totalUploadTimeInMs; public final double uploadTimeMovingAverage; public final long bytesLag; public final DirectoryFileTransferTracker.Stats directoryFileTransferTrackerStats; @@ -602,6 +437,7 @@ public Stats( double uploadBytesPerSecMovingAverage, double uploadTimeMovingAverage, long bytesLag, + long totalUploadTimeInMs, DirectoryFileTransferTracker.Stats directoryFileTransferTrackerStats ) { this.shardId = shardId; @@ -623,6 +459,7 @@ public Stats( this.uploadBytesPerSecMovingAverage = uploadBytesPerSecMovingAverage; this.uploadTimeMovingAverage = uploadTimeMovingAverage; this.bytesLag = bytesLag; + this.totalUploadTimeInMs = totalUploadTimeInMs; this.directoryFileTransferTrackerStats = directoryFileTransferTrackerStats; } @@ -647,6 +484,7 @@ public Stats(StreamInput in) throws IOException { this.uploadBytesPerSecMovingAverage = in.readDouble(); this.uploadTimeMovingAverage = in.readDouble(); this.bytesLag = in.readLong(); + this.totalUploadTimeInMs = in.readLong(); this.directoryFileTransferTrackerStats = in.readOptionalWriteable(DirectoryFileTransferTracker.Stats::new); } catch (IOException e) { throw e; @@ -674,6 +512,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeDouble(uploadBytesPerSecMovingAverage); out.writeDouble(uploadTimeMovingAverage); out.writeLong(bytesLag); + out.writeLong(totalUploadTimeInMs); out.writeOptionalWriteable(directoryFileTransferTrackerStats); } @@ -702,6 +541,7 @@ public boolean equals(Object obj) { && Double.compare(this.uploadBytesPerSecMovingAverage, other.uploadBytesPerSecMovingAverage) == 0 && Double.compare(this.uploadTimeMovingAverage, other.uploadTimeMovingAverage) == 0 && this.bytesLag == other.bytesLag + && this.totalUploadTimeInMs == other.totalUploadTimeInMs && this.directoryFileTransferTrackerStats.equals(other.directoryFileTransferTrackerStats); } @@ -727,6 +567,7 @@ public int hashCode() { uploadBytesPerSecMovingAverage, uploadTimeMovingAverage, bytesLag, + totalUploadTimeInMs, directoryFileTransferTrackerStats ); } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java index 427304935259b..33cd40f802d43 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureService.java @@ -13,32 +13,22 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.shard.IndexEventListener; -import org.opensearch.index.shard.IndexShard; import java.util.Arrays; import java.util.List; import java.util.Locale; -import java.util.Map; -import java.util.function.BiConsumer; /** * Service used to validate if the incoming indexing request should be rejected based on the {@link RemoteSegmentTransferTracker}. * * @opensearch.internal */ -public class RemoteStorePressureService implements IndexEventListener { +public class RemoteStorePressureService { private static final Logger logger = LogManager.getLogger(RemoteStorePressureService.class); - /** - * Keeps map of remote-backed index shards and their corresponding backpressure tracker. - */ - private final Map trackerMap = ConcurrentCollections.newConcurrentMap(); - /** * Remote refresh segment pressure settings which is used for creation of the backpressure tracker and as well as rejection. */ @@ -46,51 +36,21 @@ public class RemoteStorePressureService implements IndexEventListener { private final List lagValidators; + private final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; + @Inject - public RemoteStorePressureService(ClusterService clusterService, Settings settings) { + public RemoteStorePressureService( + ClusterService clusterService, + Settings settings, + RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + ) { pressureSettings = new RemoteStorePressureSettings(clusterService, settings, this); lagValidators = Arrays.asList( new ConsecutiveFailureValidator(pressureSettings), new BytesLagValidator(pressureSettings), new TimeLagValidator(pressureSettings) ); - } - - /** - * Get {@code RemoteSegmentTransferTracker} only if the underlying Index has remote segments integration enabled. - * - * @param shardId shard id - * @return the tracker if index is remote-backed, else null. - */ - public RemoteSegmentTransferTracker getRemoteRefreshSegmentTracker(ShardId shardId) { - return trackerMap.get(shardId); - } - - @Override - public void afterIndexShardCreated(IndexShard indexShard) { - if (indexShard.indexSettings().isRemoteStoreEnabled() == false) { - return; - } - ShardId shardId = indexShard.shardId(); - trackerMap.put( - shardId, - new RemoteSegmentTransferTracker( - shardId, - indexShard.store().getDirectoryFileTransferTracker(), - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ) - ); - logger.trace("Created tracker for shardId={}", shardId); - } - - @Override - public void afterIndexShardClosed(ShardId shardId, IndexShard indexShard, Settings indexSettings) { - RemoteSegmentTransferTracker remoteSegmentTransferTracker = trackerMap.remove(shardId); - if (remoteSegmentTransferTracker != null) { - logger.trace("Deleted tracker for shardId={}", shardId); - } + this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; } /** @@ -108,7 +68,7 @@ public boolean isSegmentsUploadBackpressureEnabled() { * @param shardId shardId for which the validation needs to be done. */ public void validateSegmentsUploadLag(ShardId shardId) { - RemoteSegmentTransferTracker remoteSegmentTransferTracker = getRemoteRefreshSegmentTracker(shardId); + RemoteSegmentTransferTracker remoteSegmentTransferTracker = remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId); // condition 1 - This will be null for non-remote backed indexes // condition 2 - This will be zero if the remote store is if (remoteSegmentTransferTracker == null || remoteSegmentTransferTracker.getRefreshSeqNoLag() == 0) { @@ -123,22 +83,6 @@ public void validateSegmentsUploadLag(ShardId shardId) { } } - void updateUploadBytesMovingAverageWindowSize(int updatedSize) { - updateMovingAverageWindowSize(RemoteSegmentTransferTracker::updateUploadBytesMovingAverageWindowSize, updatedSize); - } - - void updateUploadBytesPerSecMovingAverageWindowSize(int updatedSize) { - updateMovingAverageWindowSize(RemoteSegmentTransferTracker::updateUploadBytesPerSecMovingAverageWindowSize, updatedSize); - } - - void updateUploadTimeMsMovingAverageWindowSize(int updatedSize) { - updateMovingAverageWindowSize(RemoteSegmentTransferTracker::updateUploadTimeMsMovingAverageWindowSize, updatedSize); - } - - void updateMovingAverageWindowSize(BiConsumer biConsumer, int updatedSize) { - trackerMap.values().forEach(tracker -> biConsumer.accept(tracker, updatedSize)); - } - /** * Abstract class for validating if lag is acceptable or not. * @@ -189,18 +133,18 @@ public boolean validate(RemoteSegmentTransferTracker pressureTracker, ShardId sh if (pressureTracker.getRefreshSeqNoLag() <= 1) { return true; } - if (pressureTracker.isUploadBytesAverageReady() == false) { + if (pressureTracker.isUploadBytesMovingAverageReady() == false) { logger.trace("upload bytes moving average is not ready"); return true; } - double dynamicBytesLagThreshold = pressureTracker.getUploadBytesAverage() * pressureSettings.getBytesLagVarianceFactor(); + double dynamicBytesLagThreshold = pressureTracker.getUploadBytesMovingAverage() * pressureSettings.getBytesLagVarianceFactor(); long bytesLag = pressureTracker.getBytesLag(); return bytesLag <= dynamicBytesLagThreshold; } @Override public String rejectionMessage(RemoteSegmentTransferTracker pressureTracker, ShardId shardId) { - double dynamicBytesLagThreshold = pressureTracker.getUploadBytesAverage() * pressureSettings.getBytesLagVarianceFactor(); + double dynamicBytesLagThreshold = pressureTracker.getUploadBytesMovingAverage() * pressureSettings.getBytesLagVarianceFactor(); return String.format( Locale.ROOT, "rejected execution on primary shard:%s due to remote segments lagging behind local segments." @@ -235,18 +179,19 @@ public boolean validate(RemoteSegmentTransferTracker pressureTracker, ShardId sh if (pressureTracker.getRefreshSeqNoLag() <= 1) { return true; } - if (pressureTracker.isUploadTimeMsAverageReady() == false) { - logger.trace("upload time moving average is not ready"); + if (pressureTracker.isUploadTimeMovingAverageReady() == false) { return true; } long timeLag = pressureTracker.getTimeMsLag(); - double dynamicTimeLagThreshold = pressureTracker.getUploadTimeMsAverage() * pressureSettings.getUploadTimeLagVarianceFactor(); + double dynamicTimeLagThreshold = pressureTracker.getUploadTimeMovingAverage() * pressureSettings + .getUploadTimeLagVarianceFactor(); return timeLag <= dynamicTimeLagThreshold; } @Override public String rejectionMessage(RemoteSegmentTransferTracker pressureTracker, ShardId shardId) { - double dynamicTimeLagThreshold = pressureTracker.getUploadTimeMsAverage() * pressureSettings.getUploadTimeLagVarianceFactor(); + double dynamicTimeLagThreshold = pressureTracker.getUploadTimeMovingAverage() * pressureSettings + .getUploadTimeLagVarianceFactor(); return String.format( Locale.ROOT, "rejected execution on primary shard:%s due to remote segments lagging behind local segments." diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureSettings.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureSettings.java index 3f665890d43e9..e66aa3444c214 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureSettings.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePressureSettings.java @@ -20,21 +20,17 @@ */ public class RemoteStorePressureSettings { - private static class Defaults { + static class Defaults { private static final double BYTES_LAG_VARIANCE_FACTOR = 10.0; private static final double UPLOAD_TIME_LAG_VARIANCE_FACTOR = 10.0; private static final double VARIANCE_FACTOR_MIN_VALUE = 1.0; private static final int MIN_CONSECUTIVE_FAILURES_LIMIT = 5; private static final int MIN_CONSECUTIVE_FAILURES_LIMIT_MIN_VALUE = 1; - private static final int UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE = 20; - private static final int UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE = 20; - private static final int UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE = 20; - private static final int MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE = 5; } public static final Setting REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED = Setting.boolSetting( "remote_store.segment.pressure.enabled", - false, + true, Setting.Property.Dynamic, Setting.Property.NodeScope ); @@ -63,30 +59,6 @@ private static class Defaults { Setting.Property.NodeScope ); - public static final Setting UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE = Setting.intSetting( - "remote_store.segment.pressure.upload_bytes_moving_average_window_size", - Defaults.UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE, - Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - public static final Setting UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE = Setting.intSetting( - "remote_store.segment.pressure.upload_bytes_per_sec_moving_average_window_size", - Defaults.UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE, - Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - public static final Setting UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE = Setting.intSetting( - "remote_store.segment.pressure.upload_time_moving_average_window_size", - Defaults.UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE, - Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - private volatile boolean remoteRefreshSegmentPressureEnabled; private volatile long minRefreshSeqNoLagLimit; @@ -97,16 +69,10 @@ private static class Defaults { private volatile int minConsecutiveFailuresLimit; - private volatile int uploadBytesMovingAverageWindowSize; - - private volatile int uploadBytesPerSecMovingAverageWindowSize; - - private volatile int uploadTimeMovingAverageWindowSize; - public RemoteStorePressureSettings( ClusterService clusterService, Settings settings, - RemoteStorePressureService remoteUploadPressureService + RemoteStorePressureService remoteStorePressureService ) { ClusterSettings clusterSettings = clusterService.getClusterSettings(); @@ -121,93 +87,45 @@ public RemoteStorePressureSettings( this.minConsecutiveFailuresLimit = MIN_CONSECUTIVE_FAILURES_LIMIT.get(settings); clusterSettings.addSettingsUpdateConsumer(MIN_CONSECUTIVE_FAILURES_LIMIT, this::setMinConsecutiveFailuresLimit); - - this.uploadBytesMovingAverageWindowSize = UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE.get(settings); - clusterSettings.addSettingsUpdateConsumer( - UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE, - remoteUploadPressureService::updateUploadBytesMovingAverageWindowSize - ); - clusterSettings.addSettingsUpdateConsumer(UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE, this::setUploadBytesMovingAverageWindowSize); - - this.uploadBytesPerSecMovingAverageWindowSize = UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE.get(settings); - clusterSettings.addSettingsUpdateConsumer( - UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE, - remoteUploadPressureService::updateUploadBytesPerSecMovingAverageWindowSize - ); - clusterSettings.addSettingsUpdateConsumer( - UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE, - this::setUploadBytesPerSecMovingAverageWindowSize - ); - - this.uploadTimeMovingAverageWindowSize = UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE.get(settings); - clusterSettings.addSettingsUpdateConsumer( - UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE, - remoteUploadPressureService::updateUploadTimeMsMovingAverageWindowSize - ); - clusterSettings.addSettingsUpdateConsumer(UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE, this::setUploadTimeMovingAverageWindowSize); } - public boolean isRemoteRefreshSegmentPressureEnabled() { + boolean isRemoteRefreshSegmentPressureEnabled() { return remoteRefreshSegmentPressureEnabled; } - public void setRemoteRefreshSegmentPressureEnabled(boolean remoteRefreshSegmentPressureEnabled) { + private void setRemoteRefreshSegmentPressureEnabled(boolean remoteRefreshSegmentPressureEnabled) { this.remoteRefreshSegmentPressureEnabled = remoteRefreshSegmentPressureEnabled; } - public long getMinRefreshSeqNoLagLimit() { + long getMinRefreshSeqNoLagLimit() { return minRefreshSeqNoLagLimit; } - public void setMinRefreshSeqNoLagLimit(long minRefreshSeqNoLagLimit) { + private void setMinRefreshSeqNoLagLimit(long minRefreshSeqNoLagLimit) { this.minRefreshSeqNoLagLimit = minRefreshSeqNoLagLimit; } - public double getBytesLagVarianceFactor() { + double getBytesLagVarianceFactor() { return bytesLagVarianceFactor; } - public void setBytesLagVarianceFactor(double bytesLagVarianceFactor) { + private void setBytesLagVarianceFactor(double bytesLagVarianceFactor) { this.bytesLagVarianceFactor = bytesLagVarianceFactor; } - public double getUploadTimeLagVarianceFactor() { + double getUploadTimeLagVarianceFactor() { return uploadTimeLagVarianceFactor; } - public void setUploadTimeLagVarianceFactor(double uploadTimeLagVarianceFactor) { + private void setUploadTimeLagVarianceFactor(double uploadTimeLagVarianceFactor) { this.uploadTimeLagVarianceFactor = uploadTimeLagVarianceFactor; } - public int getMinConsecutiveFailuresLimit() { + int getMinConsecutiveFailuresLimit() { return minConsecutiveFailuresLimit; } - public void setMinConsecutiveFailuresLimit(int minConsecutiveFailuresLimit) { + private void setMinConsecutiveFailuresLimit(int minConsecutiveFailuresLimit) { this.minConsecutiveFailuresLimit = minConsecutiveFailuresLimit; } - - public int getUploadBytesMovingAverageWindowSize() { - return uploadBytesMovingAverageWindowSize; - } - - public void setUploadBytesMovingAverageWindowSize(int uploadBytesMovingAverageWindowSize) { - this.uploadBytesMovingAverageWindowSize = uploadBytesMovingAverageWindowSize; - } - - public int getUploadBytesPerSecMovingAverageWindowSize() { - return uploadBytesPerSecMovingAverageWindowSize; - } - - public void setUploadBytesPerSecMovingAverageWindowSize(int uploadBytesPerSecMovingAverageWindowSize) { - this.uploadBytesPerSecMovingAverageWindowSize = uploadBytesPerSecMovingAverageWindowSize; - } - - public int getUploadTimeMovingAverageWindowSize() { - return uploadTimeMovingAverageWindowSize; - } - - public void setUploadTimeMovingAverageWindowSize(int uploadTimeMovingAverageWindowSize) { - this.uploadTimeMovingAverageWindowSize = uploadTimeMovingAverageWindowSize; - } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java new file mode 100644 index 0000000000000..6058fcc7345d2 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java @@ -0,0 +1,115 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.IndexEventListener; +import org.opensearch.index.shard.IndexShard; + +import java.util.Map; + +/** + * Factory to manage stats trackers for Remote Store operations + * + * @opensearch.internal + */ +public class RemoteStoreStatsTrackerFactory implements IndexEventListener { + static class Defaults { + static final int MOVING_AVERAGE_WINDOW_SIZE = 20; + static final int MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE = 5; + } + + public static final Setting MOVING_AVERAGE_WINDOW_SIZE = Setting.intSetting( + "remote_store.moving_average_window_size", + Defaults.MOVING_AVERAGE_WINDOW_SIZE, + Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + private static final Logger logger = LogManager.getLogger(RemoteStoreStatsTrackerFactory.class); + + /** + * Number of data points to consider for a moving average statistic + */ + private volatile int movingAverageWindowSize; + + /** + * Keeps map of remote-backed index shards and their corresponding stats tracker. + */ + private final Map remoteSegmentTrackerMap = ConcurrentCollections.newConcurrentMap(); + + /** + * Keeps map of remote-backed index shards and their corresponding stats tracker. + */ + private final Map remoteTranslogTrackerMap = ConcurrentCollections.newConcurrentMap(); + + public RemoteStoreStatsTrackerFactory(ClusterService clusterService, Settings settings) { + ClusterSettings clusterSettings = clusterService.getClusterSettings(); + + this.movingAverageWindowSize = MOVING_AVERAGE_WINDOW_SIZE.get(settings); + clusterSettings.addSettingsUpdateConsumer(MOVING_AVERAGE_WINDOW_SIZE, this::updateMovingAverageWindowSize); + } + + @Override + public void afterIndexShardCreated(IndexShard indexShard) { + if (indexShard.indexSettings().isRemoteStoreEnabled() == false) { + return; + } + ShardId shardId = indexShard.shardId(); + remoteSegmentTrackerMap.put( + shardId, + new RemoteSegmentTransferTracker(shardId, indexShard.store().getDirectoryFileTransferTracker(), movingAverageWindowSize) + ); + logger.trace("Created RemoteSegmentTransferTracker for shardId={}", shardId); + remoteTranslogTrackerMap.put(shardId, new RemoteTranslogTransferTracker(shardId, movingAverageWindowSize)); + logger.trace("Created RemoteTranslogTransferTracker for shardId={}", shardId); + } + + @Override + public void afterIndexShardClosed(ShardId shardId, IndexShard indexShard, Settings indexSettings) { + RemoteSegmentTransferTracker remoteSegmentTransferTracker = remoteSegmentTrackerMap.remove(shardId); + if (remoteSegmentTransferTracker != null) { + logger.trace("Deleted RemoteSegmentTransferTracker for shardId={}", shardId); + } + + RemoteTranslogTransferTracker remoteTranslogTransferTracker = remoteTranslogTrackerMap.remove(shardId); + if (remoteTranslogTransferTracker != null) { + logger.trace("Deleted RemoteTranslogTransferTracker for shardId={}", shardId); + } + } + + private void updateMovingAverageWindowSize(int updatedSize) { + remoteSegmentTrackerMap.values().forEach(tracker -> tracker.updateMovingAverageWindowSize(updatedSize)); + remoteTranslogTrackerMap.values().forEach(tracker -> tracker.updateMovingAverageWindowSize(updatedSize)); + + // Update movingAverageWindowSize only if the trackers were successfully updated + movingAverageWindowSize = updatedSize; + } + + public RemoteSegmentTransferTracker getRemoteSegmentTransferTracker(ShardId shardId) { + return remoteSegmentTrackerMap.get(shardId); + } + + public RemoteTranslogTransferTracker getRemoteTranslogTransferTracker(ShardId shardId) { + return remoteTranslogTrackerMap.get(shardId); + } + + // visible for testing + int getMovingAverageWindowSize() { + return movingAverageWindowSize; + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index 114d07589b0c0..b4c33d781af86 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -8,7 +8,13 @@ package org.opensearch.index.remote; +import org.opensearch.common.collect.Tuple; + import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; /** * Utils for remote store @@ -69,4 +75,30 @@ public static String getSegmentName(String filename) { return filename.substring(0, endIdx); } + + /** + * + * @param mdFiles List of segment/translog metadata files + * @param fn Function to extract PrimaryTerm_Generation and Node Id from metadata file name . + * fn returns null if node id is not part of the file name + */ + public static void verifyNoMultipleWriters(List mdFiles, Function> fn) { + Map nodesByPrimaryTermAndGen = new HashMap<>(); + mdFiles.forEach(mdFile -> { + Tuple nodeIdByPrimaryTermAndGen = fn.apply(mdFile); + if (nodeIdByPrimaryTermAndGen != null) { + if (nodesByPrimaryTermAndGen.containsKey(nodeIdByPrimaryTermAndGen.v1()) + && (!nodesByPrimaryTermAndGen.get(nodeIdByPrimaryTermAndGen.v1()).equals(nodeIdByPrimaryTermAndGen.v2()))) { + throw new IllegalStateException( + "Multiple metadata files from different nodes" + + "having same primary term and generations " + + nodeIdByPrimaryTermAndGen.v1() + + " detected " + ); + } + nodesByPrimaryTermAndGen.put(nodeIdByPrimaryTermAndGen.v1(), nodeIdByPrimaryTermAndGen.v2()); + } + }); + } + } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteTransferTracker.java new file mode 100644 index 0000000000000..cbae4931b7001 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteTransferTracker.java @@ -0,0 +1,269 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.util.MovingAverage; +import org.opensearch.core.index.shard.ShardId; + +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Base class for remote store stats trackers + * + * @opensearch.internal + */ +public abstract class RemoteTransferTracker { + /** + * The shard that this tracker is associated with + */ + protected final ShardId shardId; + + /** + * Total time spent on Remote Store uploads. + */ + protected final AtomicLong totalUploadTimeInMillis; + + /** + * Total number of Remote Store uploads that have been started. + */ + protected final AtomicLong totalUploadsStarted; + + /** + * Total number of Remote Store uploads that have failed. + */ + protected final AtomicLong totalUploadsFailed; + + /** + * Total number of Remote Store that have been successful. + */ + protected final AtomicLong totalUploadsSucceeded; + + /** + * Total number of byte uploads to Remote Store that have been started. + */ + protected final AtomicLong uploadBytesStarted; + + /** + * Total number of byte uploads to Remote Store that have failed. + */ + protected final AtomicLong uploadBytesFailed; + + /** + * Total number of byte uploads to Remote Store that have been successful. + */ + protected final AtomicLong uploadBytesSucceeded; + + /** + * Provides moving average over the last N total size in bytes of files uploaded as part of Remote Store upload. + * N is window size. Wrapped with {@code AtomicReference} for dynamic changes in window size. + */ + protected final AtomicReference uploadBytesMovingAverageReference; + + /** + * This lock object is used for making sure we do not miss any data. + */ + protected final Object uploadBytesMutex; + + /** + * Provides moving average over the last N upload speed (in bytes/s) of files uploaded as part of Remote Store upload. + * N is window size. Wrapped with {@code AtomicReference} for dynamic changes in window size. + */ + protected final AtomicReference uploadBytesPerSecMovingAverageReference; + + /** + * This lock object is used for making sure we do not miss any data. + */ + protected final Object uploadBytesPerSecMutex; + + /** + * Provides moving average over the last N overall upload time (in nanos) as part of Remote Store upload. N is window size. + * Wrapped with {@code AtomicReference} for dynamic changes in window size. + */ + protected final AtomicReference uploadTimeMsMovingAverageReference; + + /** + * This lock object is used for making sure we do not miss any data. + */ + protected final Object uploadTimeMsMutex; + + public RemoteTransferTracker(ShardId shardId, int movingAverageWindowSize) { + this.shardId = shardId; + totalUploadTimeInMillis = new AtomicLong(0); + totalUploadsStarted = new AtomicLong(0); + totalUploadsFailed = new AtomicLong(0); + totalUploadsSucceeded = new AtomicLong(0); + uploadBytesStarted = new AtomicLong(0); + uploadBytesFailed = new AtomicLong(0); + uploadBytesSucceeded = new AtomicLong(0); + uploadBytesMutex = new Object(); + uploadBytesMovingAverageReference = new AtomicReference<>(new MovingAverage(movingAverageWindowSize)); + uploadBytesPerSecMutex = new Object(); + uploadBytesPerSecMovingAverageReference = new AtomicReference<>(new MovingAverage(movingAverageWindowSize)); + uploadTimeMsMutex = new Object(); + uploadTimeMsMovingAverageReference = new AtomicReference<>(new MovingAverage(movingAverageWindowSize)); + } + + ShardId getShardId() { + return shardId; + } + + public long getTotalUploadTimeInMillis() { + return totalUploadTimeInMillis.get(); + } + + public void addUploadTimeInMillis(long duration) { + totalUploadTimeInMillis.addAndGet(duration); + } + + public long getTotalUploadsStarted() { + return totalUploadsStarted.get(); + } + + public long getTotalUploadsFailed() { + return totalUploadsFailed.get(); + } + + public long getTotalUploadsSucceeded() { + return totalUploadsSucceeded.get(); + } + + public void incrementTotalUploadsStarted() { + totalUploadsStarted.addAndGet(1); + } + + public void incrementTotalUploadsFailed() { + checkTotal(totalUploadsStarted.get(), totalUploadsFailed.get(), totalUploadsSucceeded.get(), 1); + totalUploadsFailed.addAndGet(1); + } + + public void incrementTotalUploadsSucceeded() { + checkTotal(totalUploadsStarted.get(), totalUploadsFailed.get(), totalUploadsSucceeded.get(), 1); + totalUploadsSucceeded.addAndGet(1); + } + + public long getUploadBytesStarted() { + return uploadBytesStarted.get(); + } + + public long getUploadBytesFailed() { + return uploadBytesFailed.get(); + } + + public long getUploadBytesSucceeded() { + return uploadBytesSucceeded.get(); + } + + public void addUploadBytesStarted(long count) { + uploadBytesStarted.addAndGet(count); + } + + public void addUploadBytesFailed(long count) { + checkTotal(uploadBytesStarted.get(), uploadBytesFailed.get(), uploadBytesSucceeded.get(), count); + uploadBytesFailed.addAndGet(count); + } + + public void addUploadBytesSucceeded(long count) { + checkTotal(uploadBytesStarted.get(), uploadBytesFailed.get(), uploadBytesSucceeded.get(), count); + uploadBytesSucceeded.addAndGet(count); + } + + boolean isUploadBytesMovingAverageReady() { + return uploadBytesMovingAverageReference.get().isReady(); + } + + double getUploadBytesMovingAverage() { + return uploadBytesMovingAverageReference.get().getAverage(); + } + + public void updateUploadBytesMovingAverage(long count) { + updateMovingAverage(count, uploadBytesMutex, uploadBytesMovingAverageReference); + } + + boolean isUploadBytesPerSecMovingAverageReady() { + return uploadBytesPerSecMovingAverageReference.get().isReady(); + } + + double getUploadBytesPerSecMovingAverage() { + return uploadBytesPerSecMovingAverageReference.get().getAverage(); + } + + public void updateUploadBytesPerSecMovingAverage(long speed) { + updateMovingAverage(speed, uploadBytesPerSecMutex, uploadBytesPerSecMovingAverageReference); + } + + boolean isUploadTimeMovingAverageReady() { + return uploadTimeMsMovingAverageReference.get().isReady(); + } + + double getUploadTimeMovingAverage() { + return uploadTimeMsMovingAverageReference.get().getAverage(); + } + + public void updateUploadTimeMovingAverage(long duration) { + updateMovingAverage(duration, uploadTimeMsMutex, uploadTimeMsMovingAverageReference); + } + + /** + * Records a new data point for a moving average stat + * + * @param value The new data point to be added + * @param mutex The mutex to use for the update + * @param movingAverageReference The atomic reference to be updated + */ + protected void updateMovingAverage(long value, Object mutex, AtomicReference movingAverageReference) { + synchronized (mutex) { + movingAverageReference.get().record(value); + } + } + + /** + * Updates the window size for data collection. This also resets any data collected so far. + * + * @param updatedSize The updated size + */ + void updateMovingAverageWindowSize(int updatedSize) { + updateMovingAverageWindowSize(updatedSize, uploadBytesMutex, uploadBytesMovingAverageReference); + updateMovingAverageWindowSize(updatedSize, uploadBytesPerSecMutex, uploadBytesPerSecMovingAverageReference); + updateMovingAverageWindowSize(updatedSize, uploadTimeMsMutex, uploadTimeMsMovingAverageReference); + } + + /** + * Updates the window size for data collection. This also resets any data collected so far. + * + * @param updatedSize The updated size + * @param mutex The mutex to use for the update + * @param movingAverageReference The atomic reference to be updated + */ + protected void updateMovingAverageWindowSize(int updatedSize, Object mutex, AtomicReference movingAverageReference) { + synchronized (mutex) { + movingAverageReference.set(movingAverageReference.get().copyWithSize(updatedSize)); + } + } + + /** + * Validates that the sum of successful operations, failed operations, and the number of operations to add (irrespective of failed/successful) does not exceed the number of operations originally started + * @param startedCount Number of operations started + * @param failedCount Number of operations failed + * @param succeededCount Number of operations successful + * @param countToAdd Number of operations to add + */ + private void checkTotal(long startedCount, long failedCount, long succeededCount, long countToAdd) { + long delta = startedCount - (failedCount + succeededCount + countToAdd); + assert delta >= 0 : "Sum of failure count (" + + failedCount + + "), success count (" + + succeededCount + + "), and count to add (" + + countToAdd + + ") cannot exceed started count (" + + startedCount + + ")"; + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java new file mode 100644 index 0000000000000..4214a87049350 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteTranslogTransferTracker.java @@ -0,0 +1,545 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.util.MovingAverage; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; + +import java.io.IOException; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Stores Remote Translog Store-related stats for a given IndexShard. + * + * @opensearch.internal + */ +public class RemoteTranslogTransferTracker extends RemoteTransferTracker { + /** + * Epoch timestamp of the last successful Remote Translog Store upload. + */ + private final AtomicLong lastSuccessfulUploadTimestamp; + + /** + * Epoch timestamp of the last successful Remote Translog Store download. + */ + private final AtomicLong lastSuccessfulDownloadTimestamp; + + /** + * Total number of Remote Translog Store downloads that have been successful. + */ + private final AtomicLong totalDownloadsSucceeded; + + /** + * Total number of byte downloads to Remote Translog Store that have been successful. + */ + private final AtomicLong downloadBytesSucceeded; + + /** + * Total time spent on Remote Translog Store downloads. + */ + private final AtomicLong totalDownloadTimeInMillis; + + /** + * Provides moving average over the last N total size in bytes of translog files downloaded as part of Remote Translog Store download. + * N is window size. Wrapped with {@code AtomicReference} for dynamic changes in window size. + */ + private final AtomicReference downloadBytesMovingAverageReference; + + /** + * This lock object is used for making sure we do not miss any data. + */ + private final Object downloadBytesMutex; + + /** + * Provides moving average over the last N download speed (in bytes/s) of translog files downloaded as part of Remote Translog Store download. + * N is window size. Wrapped with {@code AtomicReference} for dynamic changes in window size. + */ + private final AtomicReference downloadBytesPerSecMovingAverageReference; + + /** + * This lock object is used for making sure we do not miss any data. + */ + private final Object downloadBytesPerSecMutex; + + /** + * Provides moving average over the last N overall download time (in nanos) as part of Remote Translog Store download. N is window size. + * Wrapped with {@code AtomicReference} for dynamic changes in window size. + */ + private final AtomicReference downloadTimeMsMovingAverageReference; + + /** + * This lock object is used for making sure we do not miss any data. + */ + private final Object downloadTimeMsMutex; + + public RemoteTranslogTransferTracker(ShardId shardId, int movingAverageWindowSize) { + super(shardId, movingAverageWindowSize); + + lastSuccessfulUploadTimestamp = new AtomicLong(0); + lastSuccessfulDownloadTimestamp = new AtomicLong(0); + totalDownloadsSucceeded = new AtomicLong(0); + downloadBytesSucceeded = new AtomicLong(0); + totalDownloadTimeInMillis = new AtomicLong(0); + downloadBytesMutex = new Object(); + downloadBytesMovingAverageReference = new AtomicReference<>(new MovingAverage(movingAverageWindowSize)); + downloadBytesPerSecMutex = new Object(); + downloadBytesPerSecMovingAverageReference = new AtomicReference<>(new MovingAverage(movingAverageWindowSize)); + downloadTimeMsMutex = new Object(); + downloadTimeMsMovingAverageReference = new AtomicReference<>(new MovingAverage(movingAverageWindowSize)); + } + + public long getLastSuccessfulUploadTimestamp() { + return lastSuccessfulUploadTimestamp.get(); + } + + public void setLastSuccessfulUploadTimestamp(long lastSuccessfulUploadTimestamp) { + this.lastSuccessfulUploadTimestamp.set(lastSuccessfulUploadTimestamp); + } + + /** + * Updates the window size for data collection. This also resets any data collected so far. + * + * @param updatedSize the updated size + */ + void updateMovingAverageWindowSize(int updatedSize) { + super.updateMovingAverageWindowSize(updatedSize); + updateMovingAverageWindowSize(updatedSize, downloadBytesMutex, downloadBytesMovingAverageReference); + updateMovingAverageWindowSize(updatedSize, downloadBytesPerSecMutex, downloadBytesPerSecMovingAverageReference); + updateMovingAverageWindowSize(updatedSize, downloadTimeMsMutex, downloadTimeMsMovingAverageReference); + } + + public long getTotalDownloadsSucceeded() { + return totalDownloadsSucceeded.get(); + } + + void incrementDownloadsSucceeded() { + totalDownloadsSucceeded.addAndGet(1); + } + + public long getDownloadBytesSucceeded() { + return downloadBytesSucceeded.get(); + } + + public void addDownloadBytesSucceeded(long count) { + downloadBytesSucceeded.addAndGet(count); + } + + public long getTotalDownloadTimeInMillis() { + return totalDownloadTimeInMillis.get(); + } + + public void addDownloadTimeInMillis(long duration) { + totalDownloadTimeInMillis.addAndGet(duration); + } + + public long getLastSuccessfulDownloadTimestamp() { + return lastSuccessfulDownloadTimestamp.get(); + } + + void setLastSuccessfulDownloadTimestamp(long lastSuccessfulDownloadTimestamp) { + this.lastSuccessfulDownloadTimestamp.set(lastSuccessfulDownloadTimestamp); + } + + boolean isDownloadBytesMovingAverageReady() { + return downloadBytesMovingAverageReference.get().isReady(); + } + + double getDownloadBytesMovingAverage() { + return downloadBytesMovingAverageReference.get().getAverage(); + } + + void updateDownloadBytesMovingAverage(long count) { + updateMovingAverage(count, downloadBytesMutex, downloadBytesMovingAverageReference); + } + + boolean isDownloadBytesPerSecMovingAverageReady() { + return downloadBytesPerSecMovingAverageReference.get().isReady(); + } + + double getDownloadBytesPerSecMovingAverage() { + return downloadBytesPerSecMovingAverageReference.get().getAverage(); + } + + void updateDownloadBytesPerSecMovingAverage(long speed) { + updateMovingAverage(speed, downloadBytesPerSecMutex, downloadBytesPerSecMovingAverageReference); + } + + boolean isDownloadTimeMovingAverageReady() { + return downloadTimeMsMovingAverageReference.get().isReady(); + } + + double getDownloadTimeMovingAverage() { + return downloadTimeMsMovingAverageReference.get().getAverage(); + } + + void updateDownloadTimeMovingAverage(long duration) { + updateMovingAverage(duration, downloadTimeMsMutex, downloadTimeMsMovingAverageReference); + } + + /** + * Record stats related to a download from Remote Translog Store + * @param prevDownloadBytesSucceeded Number of downloadBytesSucceeded in this tracker before the download was started + * @param prevDownloadTimeInMillis Amount of downloadTimeInMillis in this tracker before the download was started + */ + public void recordDownloadStats(long prevDownloadBytesSucceeded, long prevDownloadTimeInMillis) { + setLastSuccessfulDownloadTimestamp(System.currentTimeMillis()); + incrementDownloadsSucceeded(); + long bytesDownloaded = getDownloadBytesSucceeded() - prevDownloadBytesSucceeded; + updateDownloadBytesMovingAverage(bytesDownloaded); + long durationInMillis = getTotalDownloadTimeInMillis() - prevDownloadTimeInMillis; + updateDownloadTimeMovingAverage(durationInMillis); + if (durationInMillis > 0) { + updateDownloadBytesPerSecMovingAverage(bytesDownloaded * 1_000L / durationInMillis); + } + } + + /** + * Gets the tracker's state as seen in the stats API + * @return Stats object with the tracker's stats + */ + public RemoteTranslogTransferTracker.Stats stats() { + return new RemoteTranslogTransferTracker.Stats( + shardId, + lastSuccessfulUploadTimestamp.get(), + totalUploadsStarted.get(), + totalUploadsSucceeded.get(), + totalUploadsFailed.get(), + uploadBytesStarted.get(), + uploadBytesSucceeded.get(), + uploadBytesFailed.get(), + totalUploadTimeInMillis.get(), + uploadBytesMovingAverageReference.get().getAverage(), + uploadBytesPerSecMovingAverageReference.get().getAverage(), + uploadTimeMsMovingAverageReference.get().getAverage(), + lastSuccessfulDownloadTimestamp.get(), + totalDownloadsSucceeded.get(), + downloadBytesSucceeded.get(), + totalDownloadTimeInMillis.get(), + downloadBytesMovingAverageReference.get().getAverage(), + downloadBytesPerSecMovingAverageReference.get().getAverage(), + downloadTimeMsMovingAverageReference.get().getAverage() + ); + } + + @Override + public String toString() { + return "RemoteTranslogTransferStats{" + + "lastSuccessfulUploadTimestamp=" + + lastSuccessfulUploadTimestamp.get() + + "," + + "totalUploadsStarted=" + + totalUploadsStarted.get() + + "," + + "totalUploadsSucceeded=" + + totalUploadsSucceeded.get() + + "," + + "totalUploadsFailed=" + + totalUploadsFailed.get() + + "," + + "uploadBytesStarted=" + + uploadBytesStarted.get() + + "," + + "uploadBytesFailed=" + + uploadBytesFailed.get() + + "," + + "totalUploadTimeInMillis=" + + totalUploadTimeInMillis.get() + + "," + + "uploadBytesMovingAverage=" + + uploadBytesMovingAverageReference.get().getAverage() + + "," + + "uploadBytesPerSecMovingAverage=" + + uploadBytesPerSecMovingAverageReference.get().getAverage() + + "," + + "uploadTimeMovingAverage=" + + uploadTimeMsMovingAverageReference.get().getAverage() + + "," + + "lastSuccessfulDownloadTimestamp=" + + lastSuccessfulDownloadTimestamp.get() + + "," + + "totalDownloadsSucceeded=" + + totalDownloadsSucceeded.get() + + "," + + "downloadBytesSucceeded=" + + downloadBytesSucceeded.get() + + "," + + "totalDownloadTimeInMillis=" + + totalDownloadTimeInMillis.get() + + "," + + "downloadBytesMovingAverage=" + + downloadBytesMovingAverageReference.get().getAverage() + + "," + + "downloadBytesPerSecMovingAverage=" + + downloadBytesPerSecMovingAverageReference.get().getAverage() + + "," + + "downloadTimeMovingAverage=" + + downloadTimeMsMovingAverageReference.get().getAverage() + + "," + + "}"; + } + + /** + * Represents the tracker's state as seen in the stats API. + * + * @opensearch.internal + */ + public static class Stats implements Writeable { + + final ShardId shardId; + + /** + * Epoch timestamp of the last successful Remote Translog Store upload. + */ + public final long lastSuccessfulUploadTimestamp; + + /** + * Total number of Remote Translog Store uploads that have been started. + */ + public final long totalUploadsStarted; + + /** + * Total number of Remote Translog Store uploads that have failed. + */ + public final long totalUploadsFailed; + + /** + * Total number of Remote Translog Store that have been successful. + */ + public final long totalUploadsSucceeded; + + /** + * Total number of byte uploads to Remote Translog Store that have been started. + */ + public final long uploadBytesStarted; + + /** + * Total number of byte uploads to Remote Translog Store that have failed. + */ + public final long uploadBytesFailed; + + /** + * Total number of byte uploads to Remote Translog Store that have been successful. + */ + public final long uploadBytesSucceeded; + + /** + * Total time spent on Remote Translog Store uploads. + */ + public final long totalUploadTimeInMillis; + + /** + * Size of a Remote Translog Store upload in bytes. + */ + public final double uploadBytesMovingAverage; + + /** + * Speed of a Remote Translog Store upload in bytes-per-second. + */ + public final double uploadBytesPerSecMovingAverage; + + /** + * Time taken by a Remote Translog Store upload. + */ + public final double uploadTimeMovingAverage; + + /** + * Epoch timestamp of the last successful Remote Translog Store download. + */ + public final long lastSuccessfulDownloadTimestamp; + + /** + * Total number of Remote Translog Store downloads that have been successful. + */ + public final long totalDownloadsSucceeded; + + /** + * Total number of byte downloads from Remote Translog Store that have been successful. + */ + public final long downloadBytesSucceeded; + + /** + * Total time spent on Remote Translog Store downloads. + */ + public final long totalDownloadTimeInMillis; + + /** + * Size of a Remote Translog Store download in bytes. + */ + public final double downloadBytesMovingAverage; + + /** + * Speed of a Remote Translog Store download in bytes-per-second. + */ + public final double downloadBytesPerSecMovingAverage; + + /** + * Time taken by a Remote Translog Store download. + */ + public final double downloadTimeMovingAverage; + + public Stats( + ShardId shardId, + long lastSuccessfulUploadTimestamp, + long totalUploadsStarted, + long totalUploadsSucceeded, + long totalUploadsFailed, + long uploadBytesStarted, + long uploadBytesSucceeded, + long uploadBytesFailed, + long totalUploadTimeInMillis, + double uploadBytesMovingAverage, + double uploadBytesPerSecMovingAverage, + double uploadTimeMovingAverage, + long lastSuccessfulDownloadTimestamp, + long totalDownloadsSucceeded, + long downloadBytesSucceeded, + long totalDownloadTimeInMillis, + double downloadBytesMovingAverage, + double downloadBytesPerSecMovingAverage, + double downloadTimeMovingAverage + ) { + this.shardId = shardId; + + this.lastSuccessfulUploadTimestamp = lastSuccessfulUploadTimestamp; + this.totalUploadsStarted = totalUploadsStarted; + this.totalUploadsFailed = totalUploadsFailed; + this.totalUploadsSucceeded = totalUploadsSucceeded; + this.uploadBytesStarted = uploadBytesStarted; + this.uploadBytesFailed = uploadBytesFailed; + this.uploadBytesSucceeded = uploadBytesSucceeded; + this.totalUploadTimeInMillis = totalUploadTimeInMillis; + this.uploadBytesMovingAverage = uploadBytesMovingAverage; + this.uploadBytesPerSecMovingAverage = uploadBytesPerSecMovingAverage; + this.uploadTimeMovingAverage = uploadTimeMovingAverage; + + this.lastSuccessfulDownloadTimestamp = lastSuccessfulDownloadTimestamp; + this.totalDownloadsSucceeded = totalDownloadsSucceeded; + this.downloadBytesSucceeded = downloadBytesSucceeded; + this.totalDownloadTimeInMillis = totalDownloadTimeInMillis; + this.downloadBytesMovingAverage = downloadBytesMovingAverage; + this.downloadBytesPerSecMovingAverage = downloadBytesPerSecMovingAverage; + this.downloadTimeMovingAverage = downloadTimeMovingAverage; + } + + public Stats(StreamInput in) throws IOException { + this.shardId = new ShardId(in); + + this.lastSuccessfulUploadTimestamp = in.readVLong(); + this.totalUploadsStarted = in.readVLong(); + this.totalUploadsFailed = in.readVLong(); + this.totalUploadsSucceeded = in.readVLong(); + this.uploadBytesStarted = in.readVLong(); + this.uploadBytesFailed = in.readVLong(); + this.uploadBytesSucceeded = in.readVLong(); + this.totalUploadTimeInMillis = in.readVLong(); + this.uploadBytesMovingAverage = in.readDouble(); + this.uploadBytesPerSecMovingAverage = in.readDouble(); + this.uploadTimeMovingAverage = in.readDouble(); + + this.lastSuccessfulDownloadTimestamp = in.readVLong(); + this.totalDownloadsSucceeded = in.readVLong(); + this.downloadBytesSucceeded = in.readVLong(); + this.totalDownloadTimeInMillis = in.readVLong(); + this.downloadBytesMovingAverage = in.readDouble(); + this.downloadBytesPerSecMovingAverage = in.readDouble(); + this.downloadTimeMovingAverage = in.readDouble(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + shardId.writeTo(out); + + out.writeVLong(lastSuccessfulUploadTimestamp); + out.writeVLong(totalUploadsStarted); + out.writeVLong(totalUploadsFailed); + out.writeVLong(totalUploadsSucceeded); + out.writeVLong(uploadBytesStarted); + out.writeVLong(uploadBytesFailed); + out.writeVLong(uploadBytesSucceeded); + out.writeVLong(totalUploadTimeInMillis); + out.writeDouble(uploadBytesMovingAverage); + out.writeDouble(uploadBytesPerSecMovingAverage); + out.writeDouble(uploadTimeMovingAverage); + + out.writeVLong(lastSuccessfulDownloadTimestamp); + out.writeVLong(totalDownloadsSucceeded); + out.writeVLong(downloadBytesSucceeded); + out.writeVLong(totalDownloadTimeInMillis); + out.writeDouble(downloadBytesMovingAverage); + out.writeDouble(downloadBytesPerSecMovingAverage); + out.writeDouble(downloadTimeMovingAverage); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + RemoteTranslogTransferTracker.Stats other = (RemoteTranslogTransferTracker.Stats) obj; + + return this.shardId.equals(other.shardId) + && this.lastSuccessfulUploadTimestamp == other.lastSuccessfulUploadTimestamp + && this.totalUploadsStarted == other.totalUploadsStarted + && this.totalUploadsFailed == other.totalUploadsFailed + && this.totalUploadsSucceeded == other.totalUploadsSucceeded + && this.uploadBytesStarted == other.uploadBytesStarted + && this.uploadBytesFailed == other.uploadBytesFailed + && this.uploadBytesSucceeded == other.uploadBytesSucceeded + && this.totalUploadTimeInMillis == other.totalUploadTimeInMillis + && Double.compare(this.uploadBytesMovingAverage, other.uploadBytesMovingAverage) == 0 + && Double.compare(this.uploadBytesPerSecMovingAverage, other.uploadBytesPerSecMovingAverage) == 0 + && Double.compare(this.uploadTimeMovingAverage, other.uploadTimeMovingAverage) == 0 + && this.lastSuccessfulDownloadTimestamp == other.lastSuccessfulDownloadTimestamp + && this.totalDownloadsSucceeded == other.totalDownloadsSucceeded + && this.downloadBytesSucceeded == other.downloadBytesSucceeded + && this.totalDownloadTimeInMillis == other.totalDownloadTimeInMillis + && Double.compare(this.downloadBytesMovingAverage, other.downloadBytesMovingAverage) == 0 + && Double.compare(this.downloadBytesPerSecMovingAverage, other.downloadBytesPerSecMovingAverage) == 0 + && Double.compare(this.downloadTimeMovingAverage, other.downloadTimeMovingAverage) == 0; + } + + @Override + public int hashCode() { + return Objects.hash( + shardId.toString(), + lastSuccessfulUploadTimestamp, + totalUploadsStarted, + totalUploadsFailed, + totalUploadsSucceeded, + uploadBytesStarted, + uploadBytesFailed, + uploadBytesSucceeded, + totalUploadTimeInMillis, + uploadBytesMovingAverage, + uploadBytesPerSecMovingAverage, + uploadTimeMovingAverage, + lastSuccessfulDownloadTimestamp, + totalDownloadsSucceeded, + downloadBytesSucceeded, + totalDownloadTimeInMillis, + downloadBytesMovingAverage, + downloadBytesPerSecMovingAverage, + downloadTimeMovingAverage + ); + } + } + + /** + * Validates if the stats in this tracker and the stats contained in the given stats object are same or not + * @param other Stats object to compare this tracker against + * @return true if stats are same and false otherwise + */ + boolean hasSameStatsAs(RemoteTranslogTransferTracker.Stats other) { + return this.stats().equals(other); + } +} diff --git a/server/src/main/java/org/opensearch/index/search/MatchQuery.java b/server/src/main/java/org/opensearch/index/search/MatchQuery.java index ade4fb1e69586..9e2b79971369d 100644 --- a/server/src/main/java/org/opensearch/index/search/MatchQuery.java +++ b/server/src/main/java/org/opensearch/index/search/MatchQuery.java @@ -753,9 +753,9 @@ private Query analyzeGraphBoolean(String field, TokenStream source, BooleanClaus lastState = end; final Query queryPos; boolean usePrefix = isPrefix && end == -1; - /** - * check if the GraphTokenStreamFiniteStrings graph is empty - * return empty BooleanQuery result + /* + check if the GraphTokenStreamFiniteStrings graph is empty + return empty BooleanQuery result */ Iterator graphIt = graph.getFiniteStrings(); if (!graphIt.hasNext()) { diff --git a/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java b/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java index 78b4a5f04c955..d46b34fe97356 100644 --- a/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java +++ b/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java @@ -86,14 +86,6 @@ public void copy(int slot, int doc) throws IOException { super.copy(slot, doc); } - @Override - protected boolean isMissingValueCompetitive() { - int result = missingValue.compareTo(bottom); - // in reverse (desc) sort missingValue is competitive when it's greater or equal to bottom, - // in asc sort missingValue is competitive when it's smaller or equal to bottom - return reverse ? (result >= 0) : (result <= 0); - } - @Override protected void encodeBottom(byte[] packedValue) { BigIntegerPoint.encodeDimension(bottom, packedValue, 0); @@ -103,5 +95,15 @@ protected void encodeBottom(byte[] packedValue) { protected void encodeTop(byte[] packedValue) { BigIntegerPoint.encodeDimension(topValue, packedValue, 0); } + + @Override + protected int compareMissingValueWithBottomValue() { + return missingValue.compareTo(bottom); + } + + @Override + protected int compareMissingValueWithTopValue() { + return missingValue.compareTo(topValue); + } } } diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index e41deb514fbef..1f9144b28f286 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -33,8 +33,11 @@ package org.opensearch.index.search.stats; import org.opensearch.Version; +import org.opensearch.action.search.SearchPhaseName; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.common.Nullable; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -57,16 +60,90 @@ public class SearchStats implements Writeable, ToXContentFragment { /** - * Statistics for search + * Holds statistic values for a particular phase. * * @opensearch.internal */ + public static class PhaseStatsLongHolder implements Writeable { + + long current; + long total; + long timeInMillis; + + public long getCurrent() { + return current; + } + + public long getTotal() { + return total; + } + + public long getTimeInMillis() { + return timeInMillis; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(current); + out.writeVLong(total); + out.writeVLong(timeInMillis); + } + + PhaseStatsLongHolder() { + this(0, 0, 0); + } + + PhaseStatsLongHolder(long current, long total, long timeInMillis) { + this.current = current; + this.total = total; + this.timeInMillis = timeInMillis; + } + + PhaseStatsLongHolder(StreamInput in) throws IOException { + this.current = in.readVLong(); + this.total = in.readVLong(); + this.timeInMillis = in.readVLong(); + } + + } + + /** + * Holds requests stats for different phases. + * + * @opensearch.internal + */ + public static class RequestStatsLongHolder { + + Map requestStatsHolder = new HashMap<>(); + + public Map getRequestStatsHolder() { + return requestStatsHolder; + } + + RequestStatsLongHolder() { + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + requestStatsHolder.put(searchPhaseName.getName(), new PhaseStatsLongHolder()); + } + } + } + + /** + * Holder of statistics values + * + * @opensearch.internal + */ + public static class Stats implements Writeable, ToXContentFragment { private long queryCount; private long queryTimeInMillis; private long queryCurrent; + private long concurrentQueryCount; + private long concurrentQueryTimeInMillis; + private long concurrentQueryCurrent; + private long queryConcurrency; + private long fetchCount; private long fetchTimeInMillis; private long fetchCurrent; @@ -83,6 +160,13 @@ public static class Stats implements Writeable, ToXContentFragment { private long pitTimeInMillis; private long pitCurrent; + @Nullable + private RequestStatsLongHolder requestStatsLongHolder; + + public RequestStatsLongHolder getRequestStatsLongHolder() { + return requestStatsLongHolder; + } + private Stats() { // for internal use, initializes all counts to 0 } @@ -91,6 +175,10 @@ public Stats( long queryCount, long queryTimeInMillis, long queryCurrent, + long concurrentQueryCount, + long concurrentQueryTimeInMillis, + long concurrentQueryCurrent, + long queryConcurrency, long fetchCount, long fetchTimeInMillis, long fetchCurrent, @@ -104,10 +192,16 @@ public Stats( long suggestTimeInMillis, long suggestCurrent ) { + this.requestStatsLongHolder = new RequestStatsLongHolder(); this.queryCount = queryCount; this.queryTimeInMillis = queryTimeInMillis; this.queryCurrent = queryCurrent; + this.concurrentQueryCount = concurrentQueryCount; + this.concurrentQueryTimeInMillis = concurrentQueryTimeInMillis; + this.concurrentQueryCurrent = concurrentQueryCurrent; + this.queryConcurrency = queryConcurrency; + this.fetchCount = fetchCount; this.fetchTimeInMillis = fetchTimeInMillis; this.fetchCurrent = fetchCurrent; @@ -147,6 +241,17 @@ private Stats(StreamInput in) throws IOException { pitTimeInMillis = in.readVLong(); pitCurrent = in.readVLong(); } + + if (in.getVersion().onOrAfter(Version.V_2_11_0)) { + this.requestStatsLongHolder = new RequestStatsLongHolder(); + requestStatsLongHolder.requestStatsHolder = in.readMap(StreamInput::readString, PhaseStatsLongHolder::new); + } + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { + concurrentQueryCount = in.readVLong(); + concurrentQueryTimeInMillis = in.readVLong(); + concurrentQueryCurrent = in.readVLong(); + queryConcurrency = in.readVLong(); + } } public void add(Stats stats) { @@ -154,6 +259,11 @@ public void add(Stats stats) { queryTimeInMillis += stats.queryTimeInMillis; queryCurrent += stats.queryCurrent; + concurrentQueryCount += stats.concurrentQueryCount; + concurrentQueryTimeInMillis += stats.concurrentQueryTimeInMillis; + concurrentQueryCurrent += stats.concurrentQueryCurrent; + queryConcurrency += stats.queryConcurrency; + fetchCount += stats.fetchCount; fetchTimeInMillis += stats.fetchTimeInMillis; fetchCurrent += stats.fetchCurrent; @@ -175,6 +285,9 @@ public void addForClosingShard(Stats stats) { queryCount += stats.queryCount; queryTimeInMillis += stats.queryTimeInMillis; + concurrentQueryCount += stats.concurrentQueryCount; + concurrentQueryTimeInMillis += stats.concurrentQueryTimeInMillis; + fetchCount += stats.fetchCount; fetchTimeInMillis += stats.fetchTimeInMillis; @@ -189,6 +302,7 @@ public void addForClosingShard(Stats stats) { pitCount += stats.pitCount; pitTimeInMillis += stats.pitTimeInMillis; pitCurrent += stats.pitCurrent; + queryConcurrency += stats.queryConcurrency; } public long getQueryCount() { @@ -207,6 +321,30 @@ public long getQueryCurrent() { return queryCurrent; } + public long getConcurrentQueryCount() { + return concurrentQueryCount; + } + + public TimeValue getConcurrentQueryTime() { + return new TimeValue(concurrentQueryTimeInMillis); + } + + public double getConcurrentAvgSliceCount() { + if (concurrentQueryCount == 0) { + return 0; + } else { + return queryConcurrency / (double) concurrentQueryCount; + } + } + + public long getConcurrentQueryTimeInMillis() { + return concurrentQueryTimeInMillis; + } + + public long getConcurrentQueryCurrent() { + return concurrentQueryCurrent; + } + public long getFetchCount() { return fetchCount; } @@ -298,6 +436,24 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(pitTimeInMillis); out.writeVLong(pitCurrent); } + + if (out.getVersion().onOrAfter(Version.V_2_11_0)) { + if (requestStatsLongHolder == null) { + requestStatsLongHolder = new RequestStatsLongHolder(); + } + out.writeMap( + requestStatsLongHolder.getRequestStatsHolder(), + StreamOutput::writeString, + (stream, stats) -> stats.writeTo(stream) + ); + } + + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { + out.writeVLong(concurrentQueryCount); + out.writeVLong(concurrentQueryTimeInMillis); + out.writeVLong(concurrentQueryCurrent); + out.writeVLong(queryConcurrency); + } } @Override @@ -306,6 +462,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.QUERY_TIME_IN_MILLIS, Fields.QUERY_TIME, getQueryTime()); builder.field(Fields.QUERY_CURRENT, queryCurrent); + if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { + builder.field(Fields.CONCURRENT_QUERY_TOTAL, concurrentQueryCount); + builder.humanReadableField(Fields.CONCURRENT_QUERY_TIME_IN_MILLIS, Fields.CONCURRENT_QUERY_TIME, getConcurrentQueryTime()); + builder.field(Fields.CONCURRENT_QUERY_CURRENT, concurrentQueryCurrent); + builder.field(Fields.CONCURRENT_AVG_SLICE_COUNT, getConcurrentAvgSliceCount()); + } + builder.field(Fields.FETCH_TOTAL, fetchCount); builder.humanReadableField(Fields.FETCH_TIME_IN_MILLIS, Fields.FETCH_TIME, getFetchTime()); builder.field(Fields.FETCH_CURRENT, fetchCurrent); @@ -322,6 +485,22 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.SUGGEST_TIME_IN_MILLIS, Fields.SUGGEST_TIME, getSuggestTime()); builder.field(Fields.SUGGEST_CURRENT, suggestCurrent); + if (requestStatsLongHolder != null) { + builder.startObject(Fields.REQUEST); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + PhaseStatsLongHolder statsLongHolder = requestStatsLongHolder.requestStatsHolder.get(searchPhaseName.getName()); + if (statsLongHolder == null) { + continue; + } + builder.startObject(searchPhaseName.getName()); + builder.humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, new TimeValue(statsLongHolder.timeInMillis)); + builder.field(Fields.CURRENT, statsLongHolder.current); + builder.field(Fields.TOTAL, statsLongHolder.total); + builder.endObject(); + } + builder.endObject(); + } return builder; } } @@ -336,6 +515,24 @@ public SearchStats() { totalStats = new Stats(); } + // Set the different Request Stats fields in here + public void setSearchRequestStats(SearchRequestStats searchRequestStats) { + if (totalStats.requestStatsLongHolder == null) { + totalStats.requestStatsLongHolder = new RequestStatsLongHolder(); + } + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + totalStats.requestStatsLongHolder.requestStatsHolder.put( + searchPhaseName.getName(), + new PhaseStatsLongHolder( + searchRequestStats.getPhaseCurrent(searchPhaseName), + searchRequestStats.getPhaseTotal(searchPhaseName), + searchRequestStats.getPhaseMetric(searchPhaseName) + ) + ); + } + } + public SearchStats(Stats totalStats, long openContexts, @Nullable Map groupStats) { this.totalStats = totalStats; this.openContexts = openContexts; @@ -430,6 +627,11 @@ static final class Fields { static final String QUERY_TIME = "query_time"; static final String QUERY_TIME_IN_MILLIS = "query_time_in_millis"; static final String QUERY_CURRENT = "query_current"; + static final String CONCURRENT_QUERY_TOTAL = "concurrent_query_total"; + static final String CONCURRENT_QUERY_TIME = "concurrent_query_time"; + static final String CONCURRENT_QUERY_TIME_IN_MILLIS = "concurrent_query_time_in_millis"; + static final String CONCURRENT_QUERY_CURRENT = "concurrent_query_current"; + static final String CONCURRENT_AVG_SLICE_COUNT = "concurrent_avg_slice_count"; static final String FETCH_TOTAL = "fetch_total"; static final String FETCH_TIME = "fetch_time"; static final String FETCH_TIME_IN_MILLIS = "fetch_time_in_millis"; @@ -446,6 +648,12 @@ static final class Fields { static final String SUGGEST_TIME = "suggest_time"; static final String SUGGEST_TIME_IN_MILLIS = "suggest_time_in_millis"; static final String SUGGEST_CURRENT = "suggest_current"; + static final String REQUEST = "request"; + static final String TIME_IN_MILLIS = "time_in_millis"; + static final String TIME = "time"; + static final String CURRENT = "current"; + static final String TOTAL = "total"; + } @Override diff --git a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java index 6f6ebd5545c7a..99e3f8465c5db 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java @@ -91,6 +91,9 @@ public void onPreQueryPhase(SearchContext searchContext) { statsHolder.suggestCurrent.inc(); } else { statsHolder.queryCurrent.inc(); + if (searchContext.shouldUseConcurrentSearch()) { + statsHolder.concurrentQueryCurrent.inc(); + } } }); } @@ -104,6 +107,10 @@ public void onFailedQueryPhase(SearchContext searchContext) { } else { statsHolder.queryCurrent.dec(); assert statsHolder.queryCurrent.count() >= 0; + if (searchContext.shouldUseConcurrentSearch()) { + statsHolder.concurrentQueryCurrent.dec(); + assert statsHolder.concurrentQueryCurrent.count() >= 0; + } } }); } @@ -119,6 +126,13 @@ public void onQueryPhase(SearchContext searchContext, long tookInNanos) { statsHolder.queryMetric.inc(tookInNanos); statsHolder.queryCurrent.dec(); assert statsHolder.queryCurrent.count() >= 0; + if (searchContext.shouldUseConcurrentSearch()) { + statsHolder.concurrentQueryMetric.inc(tookInNanos); + statsHolder.concurrentQueryCurrent.dec(); + assert statsHolder.concurrentQueryCurrent.count() >= 0; + assert searchContext.searcher().getSlices() != null; + statsHolder.queryConcurrencyMetric.inc(searchContext.searcher().getSlices().length); + } } }); } @@ -206,6 +220,8 @@ public void onFreePitContext(ReaderContext readerContext) { */ static final class StatsHolder { final MeanMetric queryMetric = new MeanMetric(); + final MeanMetric concurrentQueryMetric = new MeanMetric(); + final CounterMetric queryConcurrencyMetric = new CounterMetric(); final MeanMetric fetchMetric = new MeanMetric(); /* We store scroll statistics in microseconds because with nanoseconds we run the risk of overflowing the total stats if there are * many scrolls. For example, on a system with 2^24 scrolls that have been executed, each executing for 2^10 seconds, then using @@ -218,6 +234,7 @@ static final class StatsHolder { final MeanMetric pitMetric = new MeanMetric(); final MeanMetric suggestMetric = new MeanMetric(); final CounterMetric queryCurrent = new CounterMetric(); + final CounterMetric concurrentQueryCurrent = new CounterMetric(); final CounterMetric fetchCurrent = new CounterMetric(); final CounterMetric scrollCurrent = new CounterMetric(); final CounterMetric pitCurrent = new CounterMetric(); @@ -228,6 +245,10 @@ SearchStats.Stats stats() { queryMetric.count(), TimeUnit.NANOSECONDS.toMillis(queryMetric.sum()), queryCurrent.count(), + concurrentQueryMetric.count(), + TimeUnit.NANOSECONDS.toMillis(concurrentQueryMetric.sum()), + concurrentQueryCurrent.count(), + queryConcurrencyMetric.count(), fetchMetric.count(), TimeUnit.NANOSECONDS.toMillis(fetchMetric.sum()), fetchCurrent.count(), diff --git a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java index 5069e1f7d6ccd..afa7fe83f9cff 100644 --- a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java @@ -139,7 +139,7 @@ public synchronized void markSeqNoAsPersisted(final long seqNo) { /** * Updates the processed sequence checkpoint to the given value. - * + *

              * This method is only used for segment replication since indexing doesn't * take place on the replica allowing us to avoid the check that all sequence numbers * are consecutively processed. @@ -200,7 +200,7 @@ public long getMaxSeqNo() { /** * constructs a {@link SeqNoStats} object, using local state and the supplied global checkpoint - * + *

              * This is needed to make sure the persisted local checkpoint and max seq no are consistent */ public synchronized SeqNoStats getStats(final long globalCheckpoint) { diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index ffd0403301589..f14a95fdd53b5 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -57,8 +57,10 @@ import org.opensearch.index.shard.AbstractIndexShardComponent; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ReplicationGroup; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; -import org.opensearch.indices.replication.common.ReplicationTimer; +import org.opensearch.indices.replication.common.SegmentReplicationLagTimer; import java.io.IOException; import java.nio.file.Path; @@ -69,6 +71,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.OptionalLong; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; @@ -86,7 +89,7 @@ /** * This class is responsible for tracking the replication group with its progress and safety markers (local and global checkpoints). - * + *

              * The global checkpoint is the highest sequence number for which all lower (or equal) sequence number have been processed * on all shards that are currently active. Since shards count as "active" when the cluster-manager starts * them, and before this primary shard has been notified of this fact, we also include shards that have completed recovery. These shards @@ -110,10 +113,10 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * checkpoint based on the local checkpoints of all in-sync shard copies. * - replica: this shard receives global checkpoint information from the primary (see * {@link #updateGlobalCheckpointOnReplica(long, String)}). - * + *

              * When a shard is initialized (be it a primary or replica), it initially operates in replica mode. The global checkpoint tracker is * then switched to primary mode in the following three scenarios: - * + *

              * - An initializing primary shard that is not a relocation target is moved to primary mode (using {@link #activatePrimaryMode}) once * the shard becomes active. * - An active replica shard is moved to primary mode (using {@link #activatePrimaryMode}) once it is promoted to primary. @@ -138,7 +141,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * in-sync shard copies cannot grow, otherwise the relocation target might miss this information and increase the global checkpoint * to eagerly. As consequence, some of the methods in this class are not allowed to be called while a handoff is in progress, * in particular {@link #markAllocationIdAsInSync}. - * + *

              * A notable exception to this is the method {@link #updateFromClusterManager}, which is still allowed to be called during a relocation handoff. * The reason for this is that the handoff might fail and can be aborted (using {@link #abortRelocationHandoff}), in which case * it is important that the global checkpoint tracker does not miss any state updates that might happened during the handoff attempt. @@ -714,7 +717,7 @@ public static class CheckpointState implements Writeable { * Map of ReplicationCheckpoints to ReplicationTimers. Timers are added as new checkpoints are published, and removed when * the replica is caught up. */ - Map checkpointTimers; + Map checkpointTimers; /** * The time it took to complete the most recent replication event. @@ -1163,7 +1166,7 @@ public synchronized void updateGlobalCheckpointForShard(final String allocationI /** * Update the local knowledge of the visible checkpoint for the specified allocation ID. - * + *

              * This method will also stop timers for each shard and compute replication lag metrics. * * @param allocationId the allocation ID to update the global checkpoint for @@ -1188,9 +1191,9 @@ public synchronized void updateVisibleCheckpointForShard(final String allocation cps.checkpointTimers.entrySet().removeIf((entry) -> { boolean result = entry.getKey().isAheadOf(visibleCheckpoint) == false; if (result) { - final ReplicationTimer timer = entry.getValue(); + final SegmentReplicationLagTimer timer = entry.getValue(); timer.stop(); - lastFinished.set(Math.max(lastFinished.get(), timer.time())); + lastFinished.set(Math.max(lastFinished.get(), timer.totalElapsedTime())); } return result; }); @@ -1210,7 +1213,7 @@ public synchronized void updateVisibleCheckpointForShard(final String allocation } /** - * After a new checkpoint is published, start a timer for each replica to the checkpoint. + * After a new checkpoint is published, create a timer for each replica to the checkpoint. * @param checkpoint {@link ReplicationCheckpoint} */ public synchronized void setLatestReplicationCheckpoint(ReplicationCheckpoint checkpoint) { @@ -1219,7 +1222,7 @@ public synchronized void setLatestReplicationCheckpoint(ReplicationCheckpoint ch this.latestReplicationCheckpoint = checkpoint; } if (primaryMode) { - startReplicationLagTimers(); + createReplicationLagTimers(); } } @@ -1227,7 +1230,15 @@ public ReplicationCheckpoint getLatestReplicationCheckpoint() { return this.latestReplicationCheckpoint; } - private void startReplicationLagTimers() { + private boolean isPrimaryRelocation(String allocationId) { + Optional shardRouting = routingTable.shards() + .stream() + .filter(routing -> routing.allocationId().getId().equals(allocationId)) + .findAny(); + return shardRouting.isPresent() && shardRouting.get().primary(); + } + + private void createReplicationLagTimers() { for (Map.Entry entry : checkpoints.entrySet()) { final String allocationId = entry.getKey(); if (allocationId.equals(this.shardAllocationId) == false) { @@ -1236,12 +1247,9 @@ private void startReplicationLagTimers() { // it is possible for a shard to be in-sync but not yet removed from the checkpoints collection after a failover event. if (cps.inSync && replicationGroup.getUnavailableInSyncShards().contains(allocationId) == false + && isPrimaryRelocation(allocationId) == false && latestReplicationCheckpoint.isAheadOf(cps.visibleReplicationCheckpoint)) { - cps.checkpointTimers.computeIfAbsent(latestReplicationCheckpoint, ignored -> { - final ReplicationTimer replicationTimer = new ReplicationTimer(); - replicationTimer.start(); - return replicationTimer; - }); + cps.checkpointTimers.computeIfAbsent(latestReplicationCheckpoint, ignored -> new SegmentReplicationLagTimer()); logger.trace( () -> new ParameterizedMessage( "updated last published checkpoint for {} at visible cp {} to {} - timers [{}]", @@ -1256,6 +1264,30 @@ private void startReplicationLagTimers() { } } + /** + * After a new checkpoint is published, start a timer per replica for the checkpoint. + * @param checkpoint {@link ReplicationCheckpoint} + */ + public synchronized void startReplicationLagTimers(ReplicationCheckpoint checkpoint) { + assert indexSettings.isSegRepEnabled(); + if (checkpoint.equals(latestReplicationCheckpoint) == false) { + this.latestReplicationCheckpoint = checkpoint; + } + if (primaryMode) { + checkpoints.entrySet().stream().filter(e -> !e.getKey().equals(this.shardAllocationId)).forEach(e -> { + String allocationId = e.getKey(); + final CheckpointState cps = e.getValue(); + if (cps.inSync + && replicationGroup.getUnavailableInSyncShards().contains(allocationId) == false + && isPrimaryRelocation(e.getKey()) == false + && latestReplicationCheckpoint.isAheadOf(cps.visibleReplicationCheckpoint) + && cps.checkpointTimers.containsKey(latestReplicationCheckpoint)) { + cps.checkpointTimers.get(latestReplicationCheckpoint).start(); + } + }); + } + } + /** * Fetch stats on segment replication. * @return {@link Tuple} V1 - TimeValue in ms - mean replication lag for this primary to its entire group, @@ -1272,27 +1304,27 @@ public synchronized Set getSegmentReplicationStats entry -> entry.getKey().equals(this.shardAllocationId) == false && entry.getValue().inSync && replicationGroup.getUnavailableInSyncShards().contains(entry.getKey()) == false + && isPrimaryRelocation(entry.getKey()) == false ) - .map(entry -> buildShardStats(latestReplicationCheckpoint.getLength(), entry.getKey(), entry.getValue())) + .map(entry -> buildShardStats(entry.getKey(), entry.getValue())) .collect(Collectors.toUnmodifiableSet()); } return Collections.emptySet(); } - private SegmentReplicationShardStats buildShardStats( - final long latestCheckpointLength, - final String allocationId, - final CheckpointState checkpointState - ) { - final Map checkpointTimers = checkpointState.checkpointTimers; + private SegmentReplicationShardStats buildShardStats(final String allocationId, final CheckpointState cps) { + final Store.RecoveryDiff diff = Store.segmentReplicationDiff( + latestReplicationCheckpoint.getMetadataMap(), + cps.visibleReplicationCheckpoint != null ? cps.visibleReplicationCheckpoint.getMetadataMap() : Collections.emptyMap() + ); + final long bytesBehind = diff.missing.stream().mapToLong(StoreFileMetadata::length).sum(); return new SegmentReplicationShardStats( allocationId, - checkpointTimers.size(), - checkpointState.visibleReplicationCheckpoint == null - ? latestCheckpointLength - : Math.max(latestCheckpointLength - checkpointState.visibleReplicationCheckpoint.getLength(), 0), - checkpointTimers.values().stream().mapToLong(ReplicationTimer::time).max().orElse(0), - checkpointState.lastCompletedReplicationLag + cps.checkpointTimers.size(), + bytesBehind, + cps.checkpointTimers.values().stream().mapToLong(SegmentReplicationLagTimer::time).max().orElse(0), + cps.checkpointTimers.values().stream().mapToLong(SegmentReplicationLagTimer::totalElapsedTime).max().orElse(0), + cps.lastCompletedReplicationLag ); } diff --git a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java index f74fc7eefe65c..ca3c7e1d49700 100644 --- a/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java +++ b/server/src/main/java/org/opensearch/index/seqno/RetentionLeaseSyncAction.java @@ -62,6 +62,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportResponseHandler; @@ -99,7 +100,8 @@ public RetentionLeaseSyncAction( final ShardStateAction shardStateAction, final ActionFilters actionFilters, final IndexingPressureService indexingPressureService, - final SystemIndices systemIndices + final SystemIndices systemIndices, + final Tracer tracer ) { super( settings, @@ -115,7 +117,8 @@ public RetentionLeaseSyncAction( ignore -> ThreadPool.Names.MANAGEMENT, false, indexingPressureService, - systemIndices + systemIndices, + tracer ); } diff --git a/server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java index 9bc105bf13f0a..3ee74e5267718 100644 --- a/server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/CloseableRetryableRefreshListener.java @@ -104,6 +104,7 @@ protected TimeValue getNextRetryInterval() { private void scheduleRetry(TimeValue interval, String retryThreadPoolName, boolean didRefresh) { // If the underlying listener has closed, then we do not allow even the retry to be scheduled if (closed.get() || isRetryEnabled() == false) { + getLogger().debug("skip retry on closed={} isRetryEnabled={}", closed.get(), isRetryEnabled()); return; } @@ -112,6 +113,7 @@ private void scheduleRetry(TimeValue interval, String retryThreadPoolName, boole // If the retryScheduled is already true, then we return from here itself. If not, then we proceed with scheduling // the retry. if (retryScheduled.getAndSet(true)) { + getLogger().debug("skip retry on retryScheduled=true"); return; } @@ -188,7 +190,7 @@ public final void close() throws IOException { if (semaphore.tryAcquire(TOTAL_PERMITS, 10, TimeUnit.MINUTES)) { boolean result = closed.compareAndSet(false, true); assert result && semaphore.availablePermits() == 0; - getLogger().info("Closed"); + getLogger().info("All permits are acquired and refresh listener is closed"); } else { throw new TimeoutException("timeout while closing gated refresh listener"); } @@ -200,7 +202,6 @@ public final void close() throws IOException { protected abstract Logger getLogger(); // Visible for testing - /** * Returns if the retry is scheduled or not. * diff --git a/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java b/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java index d1cb396f55d0f..3017c3ce6dcff 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexEventListener.java @@ -155,7 +155,7 @@ default void beforeIndexShardDeleted(ShardId shardId, Settings indexSettings) {} /** * Called after the index shard has been deleted from disk. - * + *

              * Note: this method is only called if the deletion of the shard did finish without an exception * * @param shardId The shard id diff --git a/server/src/main/java/org/opensearch/index/shard/IndexSettingProvider.java b/server/src/main/java/org/opensearch/index/shard/IndexSettingProvider.java index 441a9a6413ffc..861a325c45d4b 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexSettingProvider.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexSettingProvider.java @@ -32,14 +32,16 @@ package org.opensearch.index.shard; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; /** * An {@link IndexSettingProvider} is a provider for index level settings that can be set * explicitly as a default value (so they show up as "set" for newly created indices) * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface IndexSettingProvider { /** * Returns explicitly set default index {@link Settings} for the given index. This should not diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 8d250892e40d2..2ea9d009859bf 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -97,7 +97,6 @@ import org.opensearch.common.util.concurrent.RunOnce; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.common.util.set.Sets; import org.opensearch.core.Assertions; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.unit.ByteSizeValue; @@ -111,6 +110,7 @@ import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.ReplicationStats; import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.index.VersionType; import org.opensearch.index.cache.IndexCache; @@ -149,7 +149,7 @@ import org.opensearch.index.recovery.RecoveryStats; import org.opensearch.index.refresh.RefreshStats; import org.opensearch.index.remote.RemoteSegmentStats; -import org.opensearch.index.remote.RemoteStorePressureService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.search.stats.SearchStats; import org.opensearch.index.search.stats.ShardSearchStats; import org.opensearch.index.seqno.ReplicationTracker; @@ -162,6 +162,7 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer.ResyncTask; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.store.RemoteStoreFileDownloader; import org.opensearch.index.store.Store; import org.opensearch.index.store.Store.MetadataSnapshot; import org.opensearch.index.store.StoreFileMetadata; @@ -169,6 +170,7 @@ import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; import org.opensearch.index.translog.RemoteFsTranslog; +import org.opensearch.index.translog.RemoteTranslogStats; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogConfig; import org.opensearch.index.translog.TranslogFactory; @@ -181,10 +183,12 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryFailedException; import org.opensearch.indices.recovery.RecoveryListener; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; +import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.search.suggest.completion.CompletionStats; @@ -195,10 +199,12 @@ import java.io.IOException; import java.io.PrintStream; import java.nio.channels.ClosedByInterruptException; +import java.nio.channels.FileChannel; import java.nio.charset.StandardCharsets; import java.nio.file.NoSuchFileException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.EnumSet; import java.util.HashSet; @@ -335,9 +341,10 @@ Runnable getGlobalCheckpointSyncer() { private final Store remoteStore; private final BiFunction translogFactorySupplier; private final boolean isTimeSeriesIndex; - private final RemoteStorePressureService remoteStorePressureService; + private final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; private final List internalRefreshListener = new ArrayList<>(); + private final RemoteStoreFileDownloader fileDownloader; public IndexShard( final ShardRouting shardRouting, @@ -363,7 +370,10 @@ public IndexShard( final BiFunction translogFactorySupplier, @Nullable final SegmentReplicationCheckpointPublisher checkpointPublisher, @Nullable final Store remoteStore, - final RemoteStorePressureService remoteStorePressureService + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + final Supplier clusterRemoteTranslogBufferIntervalSupplier, + final String nodeId, + final RecoverySettings recoverySettings ) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -384,7 +394,7 @@ public IndexShard( threadPool, this::getEngine, indexSettings.isRemoteTranslogStoreEnabled(), - indexSettings::getRemoteTranslogUploadBufferInterval + () -> getRemoteTranslogUploadBufferInterval(clusterRemoteTranslogBufferIntervalSupplier) ); this.mapperService = mapperService; this.indexCache = indexCache; @@ -409,7 +419,7 @@ public IndexShard( logger.debug("state: [CREATED]"); this.checkIndexOnStartup = indexSettings.getValue(IndexSettings.INDEX_CHECK_ON_STARTUP); - this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, bigArrays); + this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, bigArrays, nodeId); final String aId = shardRouting.allocationId().getId(); final long primaryTerm = indexSettings.getIndexMetadata().primaryTerm(shardId.id()); this.pendingPrimaryTerm = primaryTerm; @@ -458,7 +468,8 @@ public boolean shouldCache(Query query) { this.isTimeSeriesIndex = (mapperService == null || mapperService.documentMapper() == null) ? false : mapperService.documentMapper().mappers().containsTimeStampField(); - this.remoteStorePressureService = remoteStorePressureService; + this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; + this.fileDownloader = new RemoteStoreFileDownloader(shardRouting.shardId(), threadPool, recoverySettings); } public ThreadPool getThreadPool() { @@ -548,8 +559,16 @@ public QueryCachingPolicy getQueryCachingPolicy() { } /** Only used for testing **/ - protected RemoteStorePressureService getRemoteStorePressureService() { - return remoteStorePressureService; + protected RemoteStoreStatsTrackerFactory getRemoteStoreStatsTrackerFactory() { + return remoteStoreStatsTrackerFactory; + } + + public String getNodeId() { + return translogConfig.getNodeId(); + } + + public RemoteStoreFileDownloader getFileDownloader() { + return fileDownloader; } @Override @@ -625,7 +644,7 @@ public void updateShardState( if (currentRouting.initializing() && currentRouting.isRelocationTarget() == false && newRouting.active()) { // the cluster-manager started a recovering primary, activate primary mode. replicationTracker.activatePrimaryMode(getLocalCheckpoint()); - ensurePeerRecoveryRetentionLeasesExist(); + postActivatePrimaryMode(); } } else { assert currentRouting.primary() == false : "term is only increased as part of primary promotion"; @@ -683,7 +702,16 @@ public void updateShardState( if (indexSettings.isSegRepEnabled()) { // this Shard's engine was read only, we need to update its engine before restoring local history from xlog. assert newRouting.primary() && currentRouting.primary() == false; + ReplicationTimer timer = new ReplicationTimer(); + timer.start(); + logger.debug( + "Resetting engine on promotion of shard [{}] to primary, startTime {}\n", + shardId, + timer.startTime() + ); resetEngineToGlobalCheckpoint(); + timer.stop(); + logger.info("Completed engine failover for shard [{}] in: {} ms", shardId, timer.time()); // It is possible an engine can open with a SegmentInfos on a higher gen but the reader does not refresh to // trigger our refresh listener. // Force update the checkpoint post engine reset. @@ -696,8 +724,7 @@ public void updateShardState( // are brought up to date. checkpointPublisher.publish(this, getLatestReplicationCheckpoint()); } - - ensurePeerRecoveryRetentionLeasesExist(); + postActivatePrimaryMode(); /* * If this shard was serving as a replica shard when another shard was promoted to primary then * its Lucene index was reset during the primary term transition. In particular, the Lucene index @@ -855,7 +882,7 @@ public void relocated( synchronized (mutex) { verifyRelocatingState(); replicationTracker.completeRelocationHandoff(); // make changes to primaryMode and relocated flag only under - // mutex + // mutex } } catch (final Exception e) { try { @@ -1382,7 +1409,9 @@ public MergeStats mergeStats() { if (engine == null) { return new MergeStats(); } - return engine.getMergeStats(); + final MergeStats mergeStats = engine.getMergeStats(); + mergeStats.addUnreferencedFileCleanUpStats(engine.unreferencedFileCleanUpsPerformed()); + return mergeStats; } public SegmentsStats segmentStats(boolean includeSegmentFileSizes, boolean includeUnloadedSegments) { @@ -1391,9 +1420,12 @@ public SegmentsStats segmentStats(boolean includeSegmentFileSizes, boolean inclu // Populate remote_store stats only if the index is remote store backed if (indexSettings.isRemoteStoreEnabled()) { segmentsStats.addRemoteSegmentStats( - new RemoteSegmentStats(remoteStorePressureService.getRemoteRefreshSegmentTracker(shardId).stats()) + new RemoteSegmentStats(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId).stats()) ); } + if (indexSettings.isSegRepEnabled()) { + segmentsStats.addReplicationStats(getReplicationStats()); + } return segmentsStats; } @@ -1406,7 +1438,15 @@ public FieldDataStats fieldDataStats(String... fields) { } public TranslogStats translogStats() { - return getEngine().getTranslogStats(); + TranslogStats translogStats = getEngine().getTranslogStats(); + // Populate remote_store stats only if the index is remote store backed + if (indexSettings.isRemoteStoreEnabled()) { + translogStats.addRemoteTranslogStats( + new RemoteTranslogStats(remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardId).stats()) + ); + } + + return translogStats; } public CompletionStats completionStats(String... fields) { @@ -1439,6 +1479,9 @@ public void flush(FlushRequest request) { * {@link org.opensearch.index.translog.TranslogDeletionPolicy} for details */ public void trimTranslog() { + if (isRemoteTranslogEnabled()) { + return; + } verifyNotClosed(); final Engine engine = getEngine(); engine.trimUnreferencedTranslogFiles(); @@ -1447,7 +1490,7 @@ public void trimTranslog() { /** * Rolls the tranlog generation and cleans unneeded. */ - public void rollTranslogGeneration() { + public void rollTranslogGeneration() throws IOException { final Engine engine = getEngine(); engine.rollTranslogGeneration(); } @@ -1580,8 +1623,11 @@ public GatedCloseable acquireSafeIndexCommit() throws EngineExcepti } /** - * Compute and return the latest ReplicationCheckpoint for a particular shard. - * @return EMPTY checkpoint before the engine is opened and null for non-segrep enabled indices + * return the most recently computed ReplicationCheckpoint for a particular shard. + * The checkpoint is updated inside a refresh listener and may lag behind the SegmentInfos on the reader. + * To guarantee the checkpoint is upto date with the latest on-reader infos, use `getLatestSegmentInfosAndCheckpoint` instead. + * + * @return {@link ReplicationCheckpoint} - The most recently computed ReplicationCheckpoint. */ public ReplicationCheckpoint getLatestReplicationCheckpoint() { return replicationTracker.getLatestReplicationCheckpoint(); @@ -1590,7 +1636,7 @@ public ReplicationCheckpoint getLatestReplicationCheckpoint() { /** * Compute and return the latest ReplicationCheckpoint for a shard and a GatedCloseable containing the corresponding SegmentInfos. * The segments referenced by the SegmentInfos will remain on disk until the GatedCloseable is closed. - * + *

              * Primary shards compute the seqNo used in the replication checkpoint from the fetched SegmentInfos. * Replica shards compute the seqNo from its latest processed checkpoint, which only increases when refreshing on new segments. * @@ -1600,32 +1646,12 @@ public ReplicationCheckpoint getLatestReplicationCheckpoint() { public Tuple, ReplicationCheckpoint> getLatestSegmentInfosAndCheckpoint() { assert indexSettings.isSegRepEnabled(); - Tuple, ReplicationCheckpoint> nullSegmentInfosEmptyCheckpoint = new Tuple<>( - new GatedCloseable<>(null, () -> {}), - getLatestReplicationCheckpoint() - ); - - if (getEngineOrNull() == null) { - return nullSegmentInfosEmptyCheckpoint; - } // do not close the snapshot - caller will close it. GatedCloseable snapshot = null; try { snapshot = getSegmentInfosSnapshot(); - if (snapshot.get() != null) { - SegmentInfos segmentInfos = snapshot.get(); - return new Tuple<>( - snapshot, - new ReplicationCheckpoint( - this.shardId, - getOperationPrimaryTerm(), - segmentInfos.getGeneration(), - segmentInfos.getVersion(), - store.getSegmentMetadataMap(segmentInfos).values().stream().mapToLong(StoreFileMetadata::length).sum(), - getEngine().config().getCodec().getName() - ) - ); - } + final SegmentInfos segmentInfos = snapshot.get(); + return new Tuple<>(snapshot, computeReplicationCheckpoint(segmentInfos)); } catch (IOException | AlreadyClosedException e) { logger.error("Error Fetching SegmentInfos and latest checkpoint", e); if (snapshot != null) { @@ -1636,7 +1662,39 @@ public Tuple, ReplicationCheckpoint> getLatestSegme } } } - return nullSegmentInfosEmptyCheckpoint; + return new Tuple<>(new GatedCloseable<>(null, () -> {}), getLatestReplicationCheckpoint()); + } + + /** + * Compute the latest {@link ReplicationCheckpoint} from a SegmentInfos. + * This function fetches a metadata snapshot from the store that comes with an IO cost. + * We will reuse the existing stored checkpoint if it is at the same SI version. + * + * @param segmentInfos {@link SegmentInfos} infos to use to compute. + * @return {@link ReplicationCheckpoint} Checkpoint computed from the infos. + * @throws IOException When there is an error computing segment metadata from the store. + */ + ReplicationCheckpoint computeReplicationCheckpoint(SegmentInfos segmentInfos) throws IOException { + if (segmentInfos == null) { + return ReplicationCheckpoint.empty(shardId); + } + final ReplicationCheckpoint latestReplicationCheckpoint = getLatestReplicationCheckpoint(); + if (latestReplicationCheckpoint.getSegmentInfosVersion() == segmentInfos.getVersion() + && latestReplicationCheckpoint.getSegmentsGen() == segmentInfos.getGeneration()) { + return latestReplicationCheckpoint; + } + final Map metadataMap = store.getSegmentMetadataMap(segmentInfos); + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( + this.shardId, + getOperationPrimaryTerm(), + segmentInfos.getGeneration(), + segmentInfos.getVersion(), + metadataMap.values().stream().mapToLong(StoreFileMetadata::length).sum(), + getEngine().config().getCodec().getName(), + metadataMap + ); + logger.trace("Recomputed ReplicationCheckpoint for shard {}", checkpoint); + return checkpoint; } /** @@ -1863,6 +1921,10 @@ static Engine.Searcher wrapSearcher( } } + public void onCheckpointPublished(ReplicationCheckpoint checkpoint) { + replicationTracker.startReplicationLagTimers(checkpoint); + } + /** * Used with segment replication during relocation handoff, this method updates current read only engine to global * checkpoint followed by changing to writeable engine @@ -1952,6 +2014,29 @@ private RemoteSegmentStoreDirectory getRemoteDirectory() { return ((RemoteSegmentStoreDirectory) remoteDirectory); } + /** + Returns true iff it is able to verify that remote segment store + is in sync with local + */ + boolean isRemoteSegmentStoreInSync() { + assert indexSettings.isRemoteStoreEnabled(); + try { + RemoteSegmentStoreDirectory directory = getRemoteDirectory(); + if (directory.readLatestMetadataFile() != null) { + // verifying that all files except EXCLUDE_FILES are uploaded to the remote + Collection uploadFiles = directory.getSegmentsUploadedToRemoteStore().keySet(); + SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); + Collection localFiles = segmentInfos.files(true); + if (uploadFiles.containsAll(localFiles)) { + return true; + } + } + } catch (IOException e) { + logger.error("Exception while reading latest metadata", e); + } + return false; + } + public void preRecovery() { final IndexShardState currentState = this.state; // single volatile read if (currentState == IndexShardState.CLOSED) { @@ -2283,7 +2368,7 @@ public void openEngineAndRecoverFromTranslog() throws IOException { }; // Do not load the global checkpoint if this is a remote snapshot index - if (indexSettings.isRemoteSnapshot() == false) { + if (indexSettings.isRemoteSnapshot() == false && indexSettings.isRemoteTranslogStoreEnabled() == false) { loadGlobalCheckpointToReplicationTracker(); } @@ -2321,6 +2406,7 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t } private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier, boolean syncFromRemote) throws IOException { + syncFromRemote = syncFromRemote && indexSettings.isRemoteSnapshot() == false; assert Thread.holdsLock(mutex) == false : "opening engine under mutex"; if (state != IndexShardState.RECOVERING) { throw new IndexShardNotRecoveringException(shardId, state); @@ -2339,19 +2425,38 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier, b synchronized (engineMutex) { assert currentEngineReference.get() == null : "engine is running"; verifyNotClosed(); - if (indexSettings.isRemoteStoreEnabled() && syncFromRemote) { - syncSegmentsFromRemoteSegmentStore(false, true); - } - if (indexSettings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { + if (indexSettings.isRemoteStoreEnabled()) { + // Download missing segments from remote segment store. if (syncFromRemote) { - syncRemoteTranslogAndUpdateGlobalCheckpoint(); - } else { - // we will enter this block when we do not want to recover from remote translog. - // currently only during snapshot restore, we are coming into this block. - // here, as while initiliazing remote translog we cannot skip downloading translog files, - // so before that step, we are deleting the translog files present in remote store. - deleteTranslogFilesFromRemoteTranslog(); - + syncSegmentsFromRemoteSegmentStore(false); + } + if (shardRouting.primary()) { + if (syncFromRemote) { + syncRemoteTranslogAndUpdateGlobalCheckpoint(); + } else { + // we will enter this block when we do not want to recover from remote translog. + // currently only during snapshot restore, we are coming into this block. + // here, as while initiliazing remote translog we cannot skip downloading translog files, + // so before that step, we are deleting the translog files present in remote store. + deleteTranslogFilesFromRemoteTranslog(); + } + } else if (syncFromRemote) { + // For replicas, when we download segments from remote segment store, we need to make sure that local + // translog is having the same UUID that is referred by the segments. If they are different, engine open + // fails with TranslogCorruptedException. It is safe to create empty translog for remote store enabled + // indices as replica would only need to read translog in failover scenario and we always fetch data + // from remote translog at the time of failover. + final SegmentInfos lastCommittedSegmentInfos = store().readLastCommittedSegmentsInfo(); + final String translogUUID = lastCommittedSegmentInfos.userData.get(TRANSLOG_UUID_KEY); + final long checkpoint = Long.parseLong(lastCommittedSegmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + Translog.createEmptyTranslog( + shardPath().resolveTranslog(), + shardId(), + checkpoint, + getPendingPrimaryTerm(), + translogUUID, + FileChannel::open + ); } } // we must create a new engine under mutex (see IndexShard#snapshotStoreMetadata). @@ -2934,10 +3039,24 @@ public void updateVisibleCheckpointForShard(final String allocationId, final Rep * @return {@link Tuple} V1 - TimeValue in ms - mean replication lag for this primary to its entire group, * V2 - Set of {@link SegmentReplicationShardStats} per shard in this primary's replication group. */ - public Set getReplicationStats() { + public Set getReplicationStatsForTrackedReplicas() { return replicationTracker.getSegmentReplicationStats(); } + public ReplicationStats getReplicationStats() { + if (indexSettings.isSegRepEnabled() && routingEntry().primary()) { + final Set stats = getReplicationStatsForTrackedReplicas(); + long maxBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).max().orElse(0L); + long totalBytesBehind = stats.stream().mapToLong(SegmentReplicationShardStats::getBytesBehindCount).sum(); + long maxReplicationLag = stats.stream() + .mapToLong(SegmentReplicationShardStats::getCurrentReplicationLagMillis) + .max() + .orElse(0L); + return new ReplicationStats(maxBytesBehind, totalBytesBehind, maxReplicationLag); + } + return new ReplicationStats(); + } + /** * Add a global checkpoint listener. If the global checkpoint is equal to or above the global checkpoint the listener is waiting for, * then the listener will be notified immediately via an executor (so possibly not on the current thread). If the specified timeout @@ -3318,6 +3437,20 @@ assert getLocalCheckpoint() == primaryContext.getCheckpointStates().get(routingE synchronized (mutex) { replicationTracker.activateWithPrimaryContext(primaryContext); // make changes to primaryMode flag only under mutex } + postActivatePrimaryMode(); + } + + private void postActivatePrimaryMode() { + if (indexSettings.isRemoteStoreEnabled()) { + // We make sure to upload translog (even if it does not contain any operations) to remote translog. + // This helps to get a consistent state in remote store where both remote segment store and remote + // translog contains data. + try { + getEngine().syncTranslog(); + } catch (IOException e) { + logger.error("Failed to sync translog to remote from new primary", e); + } + } ensurePeerRecoveryRetentionLeasesExist(); } @@ -3462,6 +3595,7 @@ public void startRecovery( // } // }} // } + logger.debug("startRecovery type={}", recoveryState.getRecoverySource().getType()); assert recoveryState.getRecoverySource().equals(shardRouting.recoverySource()); switch (recoveryState.getRecoverySource().getType()) { case EMPTY_STORE: @@ -3705,16 +3839,15 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro internalRefreshListener.add( new RemoteStoreRefreshListener( this, - // Add the checkpoint publisher if the Segment Replciation via remote store is enabled. - indexSettings.isSegRepWithRemoteEnabled() ? this.checkpointPublisher : SegmentReplicationCheckpointPublisher.EMPTY, - remoteStorePressureService.getRemoteRefreshSegmentTracker(shardId()) + this.checkpointPublisher, + remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId()) ) ); } - /** - * With segment replication enabled for primary relocation, recover replica shard initially as read only and - * change to a writeable engine during relocation handoff after a round of segment replication. + /* + With segment replication enabled for primary relocation, recover replica shard initially as read only and + change to a writeable engine during relocation handoff after a round of segment replication. */ boolean isReadOnlyReplica = indexSettings.isSegRepEnabled() && (shardRouting.primary() == false @@ -3726,7 +3859,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro indexSettings, warmer, store, - indexSettings.getMergePolicy(), + indexSettings.getMergePolicy(isTimeSeriesIndex), mapperService != null ? mapperService.indexAnalyzer() : null, similarityService.similarity(mapperService), engineConfigFactory.newCodecServiceOrDefault(indexSettings, mapperService, logger, codecService), @@ -3747,7 +3880,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro replicationTracker::isPrimaryMode, translogFactorySupplier.apply(indexSettings, shardRouting), isTimeSeriesDescSortOptimizationEnabled() ? DataStream.TIMESERIES_LEAF_SORTER : null // DESC @timestamp default order for - // timeseries + // timeseries ); } @@ -4124,6 +4257,8 @@ private static AsyncIOProcessor createTranslogSyncProcessor( boolean bufferAsyncIoProcessor, Supplier bufferIntervalSupplier ) { + assert bufferAsyncIoProcessor == false || Objects.nonNull(bufferIntervalSupplier) + : "If bufferAsyncIoProcessor is true, then the bufferIntervalSupplier needs to be non null"; ThreadContext threadContext = threadPool.getThreadContext(); CheckedConsumer>>, IOException> writeConsumer = candidates -> { try { @@ -4348,15 +4483,19 @@ public final boolean isSearchIdle() { } /** - * * Returns true if this shard supports search idle. - * + *

              * Indices using Segment Replication will ignore search idle unless there are no replicas. * Primary shards push out new segments only * after a refresh, so we don't want to wait for a search to trigger that cycle. Replicas will only refresh after receiving * a new set of segments. */ public final boolean isSearchIdleSupported() { + // If the index is remote store backed, then search idle is not supported. This is to ensure that async refresh + // task continues to upload to remote store periodically. + if (isRemoteTranslogEnabled()) { + return false; + } return indexSettings.isSegRepEnabled() == false || indexSettings.getNumberOfReplicas() == 0; } @@ -4511,6 +4650,9 @@ public void beforeRefresh() throws IOException {} @Override public void afterRefresh(boolean didRefresh) throws IOException { if (didRefresh) { + // We're only starting to track the replication checkpoint. The timers for replication are started when + // the checkpoint is published. This is done so that the timers do not include the time spent by primary + // in uploading the segments to remote store. updateReplicationCheckpoint(); } } @@ -4593,6 +4735,16 @@ public GatedCloseable acquireSafeIndexCommit() { } } + @Override + public GatedCloseable getSegmentInfosSnapshot() { + synchronized (engineMutex) { + if (newEngineReference.get() == null) { + throw new AlreadyClosedException("engine was closed"); + } + return newEngineReference.get().getSegmentInfosSnapshot(); + } + } + @Override public void close() throws IOException { assert Thread.holdsLock(engineMutex); @@ -4607,7 +4759,7 @@ public void close() throws IOException { }; IOUtils.close(currentEngineReference.getAndSet(readOnlyEngine)); if (indexSettings.isRemoteStoreEnabled()) { - syncSegmentsFromRemoteSegmentStore(false, true); + syncSegmentsFromRemoteSegmentStore(false); } if (indexSettings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { syncRemoteTranslogAndUpdateGlobalCheckpoint(); @@ -4663,12 +4815,23 @@ public void syncTranslogFilesFromRemoteTranslog() throws IOException { } /** - * Downloads segments from remote segment store. - * @param overrideLocal flag to override local segment files with those in remote store - * @param refreshLevelSegmentSync last refresh checkpoint is used if true, commit checkpoint otherwise - * @throws IOException if exception occurs while reading segments from remote store + * Downloads segments from remote segment store + * @param overrideLocal flag to override local segment files with those in remote store. + * @throws IOException if exception occurs while reading segments from remote store. + */ + public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal) throws IOException { + syncSegmentsFromRemoteSegmentStore(overrideLocal, () -> {}); + } + + /** + * Downloads segments from remote segment store along with updating the access time of the recovery target. + * @param overrideLocal flag to override local segment files with those in remote store. + * @param onFileSync runnable that updates the access time when run. + * @throws IOException if exception occurs while reading segments from remote store. */ - public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, boolean refreshLevelSegmentSync) throws IOException { + public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, final Runnable onFileSync) throws IOException { + boolean syncSegmentSuccess = false; + long startTimeMs = System.currentTimeMillis(); assert indexSettings.isRemoteStoreEnabled(); logger.trace("Downloading segments from remote segment store"); RemoteSegmentStoreDirectory remoteDirectory = getRemoteDirectory(); @@ -4680,9 +4843,7 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, boolean re .getSegmentsUploadedToRemoteStore() .entrySet() .stream() - // if this is a refresh level sync, ignore any segments_n uploaded to the store, we will commit the received infos bytes - // locally. - .filter(entry -> refreshLevelSegmentSync && entry.getKey().startsWith(IndexFileNames.SEGMENTS) == false) + .filter(entry -> entry.getKey().startsWith(IndexFileNames.SEGMENTS) == false) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); store.incRef(); remoteStore.incRef(); @@ -4701,9 +4862,9 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, boolean re } else { storeDirectory = store.directory(); } - copySegmentFiles(storeDirectory, remoteDirectory, null, uploadedSegments, overrideLocal); + copySegmentFiles(storeDirectory, remoteDirectory, null, uploadedSegments, overrideLocal, onFileSync); - if (refreshLevelSegmentSync && remoteSegmentMetadata != null) { + if (remoteSegmentMetadata != null) { final SegmentInfos infosSnapshot = store.buildSegmentInfos( remoteSegmentMetadata.getSegmentInfosBytes(), remoteSegmentMetadata.getGeneration() @@ -4720,9 +4881,15 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, boolean re : "There should not be any segments file in the dir"; store.commitSegmentInfos(infosSnapshot, processedLocalCheckpoint, processedLocalCheckpoint); } + syncSegmentSuccess = true; } catch (IOException e) { throw new IndexShardRecoveryException(shardId, "Exception while copying segment files from remote segment store", e); } finally { + logger.trace( + "syncSegmentsFromRemoteSegmentStore success={} elapsedTime={}", + syncSegmentSuccess, + (System.currentTimeMillis() - startTimeMs) + ); store.decRef(); remoteStore.decRef(); } @@ -4750,8 +4917,7 @@ public void syncSegmentsFromGivenRemoteSegmentStore( remoteStore.incRef(); } Map uploadedSegments = sourceRemoteDirectory - .initializeToSpecificCommit(primaryTerm, commitGeneration) - .getMetadata(); + .getSegmentsUploadedToRemoteStore(); final Directory storeDirectory = store.directory(); store.incRef(); @@ -4761,7 +4927,8 @@ public void syncSegmentsFromGivenRemoteSegmentStore( sourceRemoteDirectory, remoteDirectory, uploadedSegments, - overrideLocal + overrideLocal, + () -> {} ); if (segmentsNFile != null) { try ( @@ -4794,42 +4961,51 @@ private String copySegmentFiles( RemoteSegmentStoreDirectory sourceRemoteDirectory, RemoteSegmentStoreDirectory targetRemoteDirectory, Map uploadedSegments, - boolean overrideLocal + boolean overrideLocal, + final Runnable onFileSync ) throws IOException { - List downloadedSegments = new ArrayList<>(); - List skippedSegments = new ArrayList<>(); + Set toDownloadSegments = new HashSet<>(); + Set skippedSegments = new HashSet<>(); String segmentNFile = null; + try { - Set localSegmentFiles = Sets.newHashSet(storeDirectory.listAll()); if (overrideLocal) { - for (String file : localSegmentFiles) { + for (String file : storeDirectory.listAll()) { storeDirectory.deleteFile(file); } } + for (String file : uploadedSegments.keySet()) { long checksum = Long.parseLong(uploadedSegments.get(file).getChecksum()); if (overrideLocal || localDirectoryContains(storeDirectory, file, checksum) == false) { - storeDirectory.copyFrom(sourceRemoteDirectory, file, file, IOContext.DEFAULT); - downloadedSegments.add(file); + toDownloadSegments.add(file); } else { skippedSegments.add(file); } - if (targetRemoteDirectory != null) { - targetRemoteDirectory.copyFrom(storeDirectory, file, file, IOContext.DEFAULT); - } + if (file.startsWith(IndexFileNames.SEGMENTS)) { assert segmentNFile == null : "There should be only one SegmentInfosSnapshot file"; segmentNFile = file; } } + + if (toDownloadSegments.isEmpty() == false) { + try { + fileDownloader.download(sourceRemoteDirectory, storeDirectory, targetRemoteDirectory, toDownloadSegments, onFileSync); + } catch (Exception e) { + throw new IOException("Error occurred when downloading segments from remote store", e); + } + } } finally { - logger.trace("Downloaded segments here: {}", downloadedSegments); + logger.trace("Downloaded segments here: {}", toDownloadSegments); logger.trace("Skipped download for segments here: {}", skippedSegments); } + return segmentNFile; } - private boolean localDirectoryContains(Directory localDirectory, String file, long checksum) { + // Visible for testing + boolean localDirectoryContains(Directory localDirectory, String file, long checksum) throws IOException { try (IndexInput indexInput = localDirectory.openInput(file, IOContext.DEFAULT)) { if (checksum == CodecUtil.retrieveChecksum(indexInput)) { return true; @@ -4848,6 +5024,8 @@ private boolean localDirectoryContains(Directory localDirectory, String file, lo logger.debug("File {} does not exist in local FS, downloading from remote store", file); } catch (IOException e) { logger.warn("Exception while reading checksum of file: {}, this can happen if file is corrupted", file); + // For any other exception on reading checksum, we delete the file to re-download again + localDirectory.deleteFile(file); } return false; } @@ -4906,4 +5084,17 @@ RetentionLeaseSyncer getRetentionLeaseSyncer() { public GatedCloseable getSegmentInfosSnapshot() { return getEngine().getSegmentInfosSnapshot(); } + + private TimeValue getRemoteTranslogUploadBufferInterval(Supplier clusterRemoteTranslogBufferIntervalSupplier) { + assert Objects.nonNull(clusterRemoteTranslogBufferIntervalSupplier) : "remote translog buffer interval supplier is null"; + if (indexSettings().isRemoteTranslogBufferIntervalExplicit()) { + return indexSettings().getRemoteTranslogUploadBufferInterval(); + } + return clusterRemoteTranslogBufferIntervalSupplier.get(); + } + + // Exclusively for testing, please do not use it elsewhere. + public AsyncIOProcessor getTranslogSyncProcessor() { + return translogSyncProcessor; + } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java index 8953ef38da51b..89cbc59403faf 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexingStats.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexingStats.java @@ -37,6 +37,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -44,6 +45,7 @@ import java.io.IOException; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; /** * Tracks indexing statistics @@ -59,6 +61,89 @@ public class IndexingStats implements Writeable, ToXContentFragment { */ public static class Stats implements Writeable, ToXContentFragment { + /** + * Tracks item level rest category class codes during indexing + * + * @opensearch.internal + */ + public static class DocStatusStats implements Writeable, ToXContentFragment { + + final AtomicLong[] docStatusCounter; + + public DocStatusStats() { + docStatusCounter = new AtomicLong[5]; + for (int i = 0; i < docStatusCounter.length; ++i) { + docStatusCounter[i] = new AtomicLong(0); + } + } + + public DocStatusStats(StreamInput in) throws IOException { + docStatusCounter = in.readArray(i -> new AtomicLong(i.readLong()), AtomicLong[]::new); + + assert docStatusCounter.length == 5 : "Length of incoming array should be 5! Got " + docStatusCounter.length; + } + + /** + * Increment counter for status + * + * @param status {@link RestStatus} + */ + public void inc(final RestStatus status) { + add(status, 1L); + } + + /** + * Increment counter for status by count + * + * @param status {@link RestStatus} + * @param delta The value to add + */ + void add(final RestStatus status, final long delta) { + docStatusCounter[status.getStatusFamilyCode() - 1].addAndGet(delta); + } + + /** + * Accumulate stats from the passed Object + * + * @param stats Instance storing {@link DocStatusStats} + */ + public void add(final DocStatusStats stats) { + if (null == stats) { + return; + } + + for (int i = 0; i < docStatusCounter.length; ++i) { + docStatusCounter[i].addAndGet(stats.docStatusCounter[i].longValue()); + } + } + + public AtomicLong[] getDocStatusCounter() { + return docStatusCounter; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.DOC_STATUS); + + for (int i = 0; i < docStatusCounter.length; ++i) { + long value = docStatusCounter[i].longValue(); + + if (value > 0) { + String key = i + 1 + "xx"; + builder.field(key, value); + } + } + + return builder.endObject(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeArray((o, v) -> o.writeLong(v.longValue()), docStatusCounter); + } + + } + private long indexCount; private long indexTimeInMillis; private long indexCurrent; @@ -69,8 +154,11 @@ public static class Stats implements Writeable, ToXContentFragment { private long noopUpdateCount; private long throttleTimeInMillis; private boolean isThrottled; + private final DocStatusStats docStatusStats; - Stats() {} + Stats() { + docStatusStats = new DocStatusStats(); + } public Stats(StreamInput in) throws IOException { indexCount = in.readVLong(); @@ -83,6 +171,12 @@ public Stats(StreamInput in) throws IOException { noopUpdateCount = in.readVLong(); isThrottled = in.readBoolean(); throttleTimeInMillis = in.readLong(); + + if (in.getVersion().onOrAfter(Version.V_2_11_0)) { + docStatusStats = in.readOptionalWriteable(DocStatusStats::new); + } else { + docStatusStats = null; + } } public Stats( @@ -95,7 +189,8 @@ public Stats( long deleteCurrent, long noopUpdateCount, boolean isThrottled, - long throttleTimeInMillis + long throttleTimeInMillis, + DocStatusStats docStatusStats ) { this.indexCount = indexCount; this.indexTimeInMillis = indexTimeInMillis; @@ -107,6 +202,7 @@ public Stats( this.noopUpdateCount = noopUpdateCount; this.isThrottled = isThrottled; this.throttleTimeInMillis = throttleTimeInMillis; + this.docStatusStats = docStatusStats; } public void add(Stats stats) { @@ -121,8 +217,10 @@ public void add(Stats stats) { noopUpdateCount += stats.noopUpdateCount; throttleTimeInMillis += stats.throttleTimeInMillis; - if (isThrottled != stats.isThrottled) { - isThrottled = true; // When combining if one is throttled set result to throttled. + isThrottled |= stats.isThrottled; // When combining if one is throttled set result to throttled. + + if (getDocStatusStats() != null) { + getDocStatusStats().add(stats.getDocStatusStats()); } } @@ -193,6 +291,10 @@ public long getNoopUpdateCount() { return noopUpdateCount; } + public DocStatusStats getDocStatusStats() { + return docStatusStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(indexCount); @@ -206,6 +308,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isThrottled); out.writeLong(throttleTimeInMillis); + if (out.getVersion().onOrAfter(Version.V_2_11_0)) { + out.writeOptionalWriteable(docStatusStats); + } } @Override @@ -223,8 +328,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(Fields.IS_THROTTLED, isThrottled); builder.humanReadableField(Fields.THROTTLED_TIME_IN_MILLIS, Fields.THROTTLED_TIME, getThrottleTime()); + + if (getDocStatusStats() != null) { + getDocStatusStats().toXContent(builder, params); + } + return builder; } + } private final Stats totalStats; @@ -279,7 +390,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par * * @opensearch.internal */ - static final class Fields { + private static final class Fields { static final String INDEXING = "indexing"; static final String INDEX_TOTAL = "index_total"; static final String INDEX_TIME = "index_time"; @@ -294,6 +405,7 @@ static final class Fields { static final String IS_THROTTLED = "is_throttled"; static final String THROTTLED_TIME_IN_MILLIS = "throttle_time_in_millis"; static final String THROTTLED_TIME = "throttle_time"; + static final String DOC_STATUS = "doc_status"; } @Override @@ -303,4 +415,5 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } } + } diff --git a/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java b/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java index d7e15dd3e40f5..55b65bb4be6d8 100644 --- a/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java +++ b/server/src/main/java/org/opensearch/index/shard/InternalIndexingStats.java @@ -154,7 +154,8 @@ IndexingStats.Stats stats(boolean isThrottled, long currentThrottleMillis) { deleteCurrent.count(), noopUpdates.count(), isThrottled, - TimeUnit.MILLISECONDS.toMillis(currentThrottleMillis) + TimeUnit.MILLISECONDS.toMillis(currentThrottleMillis), + new IndexingStats.Stats.DocStatusStats() ); } } diff --git a/server/src/main/java/org/opensearch/index/shard/RefreshListeners.java b/server/src/main/java/org/opensearch/index/shard/RefreshListeners.java index 7dbbcbb2d7d20..803db773efe6c 100644 --- a/server/src/main/java/org/opensearch/index/shard/RefreshListeners.java +++ b/server/src/main/java/org/opensearch/index/shard/RefreshListeners.java @@ -54,7 +54,7 @@ /** * Allows for the registration of listeners that are called when a change becomes visible for search. This functionality is exposed from * {@link IndexShard} but kept here so it can be tested without standing up the entire thing. - * + *

              * When {@link Closeable#close()}d it will no longer accept listeners and flush any existing listeners. * * @opensearch.internal @@ -86,7 +86,7 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener, * List of refresh listeners. Defaults to null and built on demand because most refresh cycles won't need it. Entries are never removed * from it, rather, it is nulled and rebuilt when needed again. The (hopefully) rare entries that didn't make the current refresh cycle * are just added back to the new list. Both the reference and the contents are always modified while synchronized on {@code this}. - * + *

              * We never set this to non-null while closed it {@code true}. */ private volatile List>> refreshListeners = null; diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 05dba0e2f1c1c..b9b994c413d10 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -20,6 +20,7 @@ import org.opensearch.action.LatchedActionListener; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.logging.Loggers; import org.opensearch.common.unit.TimeValue; @@ -86,10 +87,9 @@ public final class RemoteStoreRefreshListener extends CloseableRetryableRefreshL private final RemoteSegmentStoreDirectory remoteDirectory; private final RemoteSegmentTransferTracker segmentTracker; private final Map localSegmentChecksumMap; - private long primaryTerm; + private volatile long primaryTerm; private volatile Iterator backoffDelayIterator; private final SegmentReplicationCheckpointPublisher checkpointPublisher; - private final UploadListener statsListener; public RemoteStoreRefreshListener( IndexShard indexShard, @@ -117,26 +117,6 @@ public RemoteStoreRefreshListener( this.segmentTracker = segmentTracker; resetBackOffDelayIterator(); this.checkpointPublisher = checkpointPublisher; - this.statsListener = new UploadListener() { - @Override - public void beforeUpload(String file) { - // Start tracking the upload bytes started - segmentTracker.addUploadBytesStarted(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); - } - - @Override - public void onSuccess(String file) { - // Track upload success - segmentTracker.addUploadBytesSucceeded(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); - segmentTracker.addToLatestUploadedFiles(file); - } - - @Override - public void onFailure(String file) { - // Track upload failure - segmentTracker.addUploadBytesFailed(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); - } - }; } @Override @@ -144,13 +124,12 @@ public void beforeRefresh() throws IOException {} @Override protected void runAfterRefreshExactlyOnce(boolean didRefresh) { - if (shouldSync(didRefresh)) { - segmentTracker.updateLocalRefreshTimeAndSeqNo(); + // We have 2 separate methods to check if sync needs to be done or not. This is required since we use the return boolean + // from isReadyForUpload to schedule refresh retries as the index shard or the primary mode are not in complete + // ready state. + if (shouldSync(didRefresh, true) && isReadyForUpload()) { try { - if (this.primaryTerm != indexShard.getOperationPrimaryTerm()) { - this.primaryTerm = indexShard.getOperationPrimaryTerm(); - this.remoteDirectory.init(); - } + segmentTracker.updateLocalRefreshTimeAndSeqNo(); try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { Collection localSegmentsPostRefresh = segmentInfosGatedCloseable.get().files(true); updateLocalSizeMapAndTracker(localSegmentsPostRefresh); @@ -171,9 +150,7 @@ protected void runAfterRefreshExactlyOnce(boolean didRefresh) { @Override protected boolean performAfterRefreshWithPermit(boolean didRefresh) { boolean successful; - // The third condition exists for uploading the zero state segments where the refresh has not changed the reader reference, but it - // is important to upload the zero state segments so that the restore does not break. - if (shouldSync(didRefresh)) { + if (shouldSync(didRefresh, false)) { successful = syncSegments(); } else { successful = true; @@ -181,22 +158,40 @@ protected boolean performAfterRefreshWithPermit(boolean didRefresh) { return successful; } - private boolean shouldSync(boolean didRefresh) { - return this.primaryTerm != indexShard.getOperationPrimaryTerm() - || didRefresh - || remoteDirectory.getSegmentsUploadedToRemoteStore().isEmpty(); + /** + * This checks if there is a sync required to remote. + * + * @param didRefresh if the readers changed. + * @param skipPrimaryTermCheck consider change in primary term or not for should sync + * @return true if sync is needed + */ + private boolean shouldSync(boolean didRefresh, boolean skipPrimaryTermCheck) { + boolean shouldSync = didRefresh // If the readers change, didRefresh is always true. + // The third condition exists for uploading the zero state segments where the refresh has not changed the reader + // reference, but it is important to upload the zero state segments so that the restore does not break. + || remoteDirectory.getSegmentsUploadedToRemoteStore().isEmpty() + // When the shouldSync is called the first time, then 1st condition on primary term is true. But after that + // we update the primary term and the same condition would not evaluate to true again in syncSegments. + // Below check ensures that if there is commit, then that gets picked up by both 1st and 2nd shouldSync call. + || isRefreshAfterCommitSafe(); + if (shouldSync || skipPrimaryTermCheck) { + return shouldSync; + } + return this.primaryTerm != indexShard.getOperationPrimaryTerm(); } + /* + @return false if retry is needed + */ private boolean syncSegments() { - if (indexShard.getReplicationTracker().isPrimaryMode() == false || indexShard.state() == IndexShardState.CLOSED) { - logger.trace( - "Skipped syncing segments with primaryMode={} indexShardState={}", - indexShard.getReplicationTracker().isPrimaryMode(), - indexShard.state() - ); - return true; + if (isReadyForUpload() == false) { + // Following check is required to enable retry and make sure that we do not lose this refresh event + // When primary shard is restored from remote store, the recovery happens first followed by changing + // primaryMode to true. Due to this, the refresh that is triggered post replay of translog will not go through + // if following condition does not exist. The segments created as part of translog replay will not be present + // in the remote store. + return indexShard.state() != IndexShardState.STARTED || !(indexShard.getEngine() instanceof InternalEngine); } - ReplicationCheckpoint checkpoint = indexShard.getLatestReplicationCheckpoint(); beforeSegmentsSync(); long refreshTimeMs = segmentTracker.getLocalRefreshTimeMs(), refreshClockTimeMs = segmentTracker.getLocalRefreshClockTimeMs(); long refreshSeqNo = segmentTracker.getLocalRefreshSeqNo(); @@ -205,6 +200,7 @@ private boolean syncSegments() { try { try { + initializeRemoteDirectoryOnTermUpdate(); // if a new segments_N file is present in local that is not uploaded to remote store yet, it // is considered as a first refresh post commit. A cleanup of stale commit files is triggered. // This is done to avoid delete post each refresh. @@ -214,10 +210,7 @@ private boolean syncSegments() { try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); - assert segmentInfos.getGeneration() == checkpoint.getSegmentsGen() : "SegmentInfos generation: " - + segmentInfos.getGeneration() - + " does not match metadata generation: " - + checkpoint.getSegmentsGen(); + final ReplicationCheckpoint checkpoint = indexShard.computeReplicationCheckpoint(segmentInfos); // Capture replication checkpoint before uploading the segments as upload can take some time and checkpoint can // move. long lastRefreshedCheckpoint = ((InternalEngine) indexShard.getEngine()).lastRefreshedCheckpoint(); @@ -230,8 +223,10 @@ private boolean syncSegments() { @Override public void onResponse(Void unused) { try { + logger.debug("New segments upload successful"); // Start metadata file upload uploadMetadata(localSegmentsPostRefresh, segmentInfos, checkpoint); + logger.debug("Metadata upload successful"); clearStaleFilesFromLocalSegmentChecksumMap(localSegmentsPostRefresh); onSuccessfulSegmentsSync( refreshTimeMs, @@ -275,6 +270,7 @@ public void onFailure(Exception e) { updateFinalStatusInSegmentTracker(successful.get(), bytesBeforeUpload, startTimeInNS); // If there are failures in uploading segments, then we should retry as search idle can lead to // refresh not occurring until write happens. + logger.debug("syncSegments runStatus={}", successful.get()); return successful.get(); } @@ -313,6 +309,7 @@ private void onSuccessfulSegmentsSync( ((InternalEngine) indexShard.getEngine()).translogManager().setMinSeqNoToKeep(lastRefreshedCheckpoint + 1); // Publishing the new checkpoint which is used for remote store + segrep indexes checkpointPublisher.publish(indexShard, checkpoint); + logger.debug("onSuccessfulSegmentsSync lastRefreshedCheckpoint={} checkpoint={}", lastRefreshedCheckpoint, checkpoint); } /** @@ -338,6 +335,19 @@ private boolean isRefreshAfterCommit() throws IOException { && !remoteDirectory.containsFile(lastCommittedLocalSegmentFileName, getChecksumOfLocalFile(lastCommittedLocalSegmentFileName))); } + /** + * Returns if the current refresh has happened after a commit. + * @return true if this refresh has happened on account of a commit. If otherwise or exception, returns false. + */ + private boolean isRefreshAfterCommitSafe() { + try { + return isRefreshAfterCommit(); + } catch (Exception e) { + logger.info("Exception occurred in isRefreshAfterCommitSafe", e); + } + return false; + } + void uploadMetadata(Collection localSegmentsPostRefresh, SegmentInfos segmentInfos, ReplicationCheckpoint replicationCheckpoint) throws IOException { final long maxSeqNo = ((InternalEngine) indexShard.getEngine()).currentOngoingRefreshCheckpoint(); @@ -358,7 +368,8 @@ void uploadMetadata(Collection localSegmentsPostRefresh, SegmentInfos se segmentInfosSnapshot, storeDirectory, translogFileGeneration, - replicationCheckpoint + replicationCheckpoint, + indexShard.getNodeId() ); } } @@ -366,14 +377,18 @@ void uploadMetadata(Collection localSegmentsPostRefresh, SegmentInfos se private void uploadNewSegments(Collection localSegmentsPostRefresh, ActionListener listener) { Collection filteredFiles = localSegmentsPostRefresh.stream().filter(file -> !skipUpload(file)).collect(Collectors.toList()); if (filteredFiles.size() == 0) { + logger.debug("No new segments to upload in uploadNewSegments"); listener.onResponse(null); return; } + logger.debug("Effective new segments files to upload {}", filteredFiles); ActionListener> mappedListener = ActionListener.map(listener, resp -> null); GroupedActionListener batchUploadListener = new GroupedActionListener<>(mappedListener, filteredFiles.size()); for (String src : filteredFiles) { + // Initializing listener here to ensure that the stats increment operations are thread-safe + UploadListener statsListener = createUploadListener(); ActionListener aggregatedListener = ActionListener.wrap(resp -> { statsListener.onSuccess(src); batchUploadListener.onResponse(resp); @@ -442,14 +457,103 @@ private void updateFinalStatusInSegmentTracker(boolean uploadStatus, long bytesB long bytesUploaded = segmentTracker.getUploadBytesSucceeded() - bytesBeforeUpload; long timeTakenInMS = TimeValue.nsecToMSec(System.nanoTime() - startTimeInNS); segmentTracker.incrementTotalUploadsSucceeded(); - segmentTracker.addUploadBytes(bytesUploaded); - segmentTracker.addUploadBytesPerSec((bytesUploaded * 1_000L) / Math.max(1, timeTakenInMS)); - segmentTracker.addUploadTimeMs(timeTakenInMS); + segmentTracker.updateUploadBytesMovingAverage(bytesUploaded); + segmentTracker.updateUploadBytesPerSecMovingAverage((bytesUploaded * 1_000L) / Math.max(1, timeTakenInMS)); + segmentTracker.updateUploadTimeMovingAverage(timeTakenInMS); } else { segmentTracker.incrementTotalUploadsFailed(); } } + /** + * On primary term update, we (re)initialise the remote segment directory to reflect the latest metadata file that + * has been uploaded to remote store successfully. This method also updates the segment tracker about the latest + * uploaded segment files onto remote store. + */ + private void initializeRemoteDirectoryOnTermUpdate() throws IOException { + if (this.primaryTerm != indexShard.getOperationPrimaryTerm()) { + logger.trace("primaryTerm update from={} to={}", primaryTerm, indexShard.getOperationPrimaryTerm()); + this.primaryTerm = indexShard.getOperationPrimaryTerm(); + RemoteSegmentMetadata uploadedMetadata = this.remoteDirectory.init(); + + // During failover, the uploaded metadata would have names of files that have been uploaded to remote store. + // Here we update the tracker with latest remote uploaded files. + if (uploadedMetadata != null) { + segmentTracker.setLatestUploadedFiles(uploadedMetadata.getMetadata().keySet()); + } + } + } + + /** + * This checks for readiness of the index shard and primary mode. This has separated from shouldSync since we use the + * returned value of this method for scheduling retries in syncSegments method. + * @return true iff primaryMode is true and index shard is not in closed state. + */ + private boolean isReadyForUpload() { + boolean isReady = (indexShard.getReplicationTracker().isPrimaryMode() && indexShard.state() != IndexShardState.CLOSED) + || isLocalOrSnapshotRecovery(); + + if (isReady == false) { + StringBuilder sb = new StringBuilder("Skipped syncing segments with"); + if (indexShard.getReplicationTracker() != null) { + sb.append(" primaryMode=").append(indexShard.getReplicationTracker().isPrimaryMode()); + } + if (indexShard.state() != null) { + sb.append(" indexShardState=").append(indexShard.state()); + } + if (indexShard.getEngineOrNull() != null) { + sb.append(" engineType=").append(indexShard.getEngine().getClass().getSimpleName()); + } + if (isLocalOrSnapshotRecovery() == false) { + sb.append(" recoverySourceType=").append(indexShard.recoveryState().getRecoverySource().getType()); + sb.append(" primary=").append(indexShard.shardRouting.primary()); + } + logger.trace(sb.toString()); + } + return isReady; + } + + private boolean isLocalOrSnapshotRecovery() { + // In this case when the primary mode is false, we need to upload segments to Remote Store + // This is required in case of snapshots/shrink/ split/clone where we need to durable persist + // all segments to remote before completing the recovery to ensure durability. + + return (indexShard.state() == IndexShardState.RECOVERING && indexShard.shardRouting.primary()) + && (indexShard.recoveryState().getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS + || indexShard.recoveryState().getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT); + } + + /** + * Creates an {@link UploadListener} containing the stats population logic which would be triggered before and after segment upload events + */ + private UploadListener createUploadListener() { + return new UploadListener() { + private long uploadStartTime = 0; + + @Override + public void beforeUpload(String file) { + // Start tracking the upload bytes started + segmentTracker.addUploadBytesStarted(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); + uploadStartTime = System.currentTimeMillis(); + } + + @Override + public void onSuccess(String file) { + // Track upload success + segmentTracker.addUploadBytesSucceeded(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); + segmentTracker.addToLatestUploadedFiles(file); + segmentTracker.addUploadTimeInMillis(Math.max(1, System.currentTimeMillis() - uploadStartTime)); + } + + @Override + public void onFailure(String file) { + // Track upload failure + segmentTracker.addUploadBytesFailed(segmentTracker.getLatestLocalFileNameLengthMap().get(file)); + segmentTracker.addUploadTimeInMillis(Math.max(1, System.currentTimeMillis() - uploadStartTime)); + } + }; + } + @Override protected Logger getLogger() { return logger; diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index d0c083390ab70..5b1940bb1d9a5 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -191,6 +191,15 @@ void recoverFromLocalShards( // just trigger a merge to do housekeeping on the // copied segments - we will also see them in stats etc. indexShard.getEngine().forceMerge(false, -1, false, false, false, UUIDs.randomBase64UUID()); + if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.isRemoteSegmentStoreInSync() == false) { + throw new IndexShardRecoveryException( + indexShard.shardId(), + "failed to upload to remote", + new IOException("Failed to upload to remote segment store") + ); + } + } return true; } catch (IOException ex) { throw new IndexShardRecoveryException(indexShard.shardId(), "failed to recover from local shards", ex); @@ -399,7 +408,12 @@ void recoverFromSnapshotAndRemoteStore( RemoteSegmentStoreDirectory sourceRemoteDirectory = (RemoteSegmentStoreDirectory) directoryFactory.newDirectory( remoteStoreRepository, indexUUID, - String.valueOf(shardId.id()) + shardId + ); + sourceRemoteDirectory.initializeToSpecificCommit( + primaryTerm, + commitGeneration, + recoverySource.snapshot().getSnapshotId().getUUID() ); indexShard.syncSegmentsFromGivenRemoteSegmentStore(true, sourceRemoteDirectory, primaryTerm, commitGeneration); final Store store = indexShard.store(); @@ -418,6 +432,12 @@ void recoverFromSnapshotAndRemoteStore( } indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); + if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.isRemoteSegmentStoreInSync() == false) { + listener.onFailure(new IndexShardRestoreFailedException(shardId, "Failed to upload to remote segment store")); + return; + } + } indexShard.postRecovery("restore done"); listener.onResponse(true); @@ -535,11 +555,12 @@ private void recoverFromRemoteStore(IndexShard indexShard) throws IndexShardReco remoteStore.incRef(); try { // Download segments from remote segment store - indexShard.syncSegmentsFromRemoteSegmentStore(true, true); - + indexShard.syncSegmentsFromRemoteSegmentStore(true); indexShard.syncTranslogFilesFromRemoteTranslog(); - if (store.directory().listAll().length == 0) { + // On index creation, the only segment file that is created is segments_N. We can safely discard this file + // as there is no data associated with this shard as part of segments. + if (store.directory().listAll().length <= 1) { Path location = indexShard.shardPath().resolveTranslog(); Checkpoint checkpoint = Checkpoint.read(location.resolve(CHECKPOINT_FILE_NAME)); final Path translogFile = location.resolve(Translog.getFilename(checkpoint.getGeneration())); @@ -696,6 +717,12 @@ private void restore( } indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); + if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.isRemoteSegmentStoreInSync() == false) { + listener.onFailure(new IndexShardRestoreFailedException(shardId, "Failed to upload to remote segment store")); + return; + } + } indexShard.postRecovery("restore done"); listener.onResponse(true); }, e -> listener.onFailure(new IndexShardRestoreFailedException(shardId, "restore failed", e))); diff --git a/server/src/main/java/org/opensearch/index/similarity/SimilarityProvider.java b/server/src/main/java/org/opensearch/index/similarity/SimilarityProvider.java index ad64f3a55228f..085e93c794fb7 100644 --- a/server/src/main/java/org/opensearch/index/similarity/SimilarityProvider.java +++ b/server/src/main/java/org/opensearch/index/similarity/SimilarityProvider.java @@ -70,23 +70,23 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; SimilarityProvider that = (SimilarityProvider) o; - /** - * We check name only because the similarity is - * re-created for each new instance and they don't implement equals. - * This is not entirely correct though but we only use equality checks - * for similarities inside the same index and names are unique in this case. - **/ + /* + We check name only because the similarity is + re-created for each new instance and they don't implement equals. + This is not entirely correct though but we only use equality checks + for similarities inside the same index and names are unique in this case. + */ return Objects.equals(name, that.name); } @Override public int hashCode() { - /** - * We use name only because the similarity is - * re-created for each new instance and they don't implement equals. - * This is not entirely correct though but we only use equality checks - * for similarities a single index and names are unique in this case. - **/ + /* + We use name only because the similarity is + re-created for each new instance and they don't implement equals. + This is not entirely correct though but we only use equality checks + for similarities a single index and names are unique in this case. + */ return Objects.hash(name); } } diff --git a/server/src/main/java/org/opensearch/index/snapshots/blobstore/RateLimitingInputStream.java b/server/src/main/java/org/opensearch/index/snapshots/blobstore/RateLimitingInputStream.java index 86ecef1173e48..ee601f96ecee1 100644 --- a/server/src/main/java/org/opensearch/index/snapshots/blobstore/RateLimitingInputStream.java +++ b/server/src/main/java/org/opensearch/index/snapshots/blobstore/RateLimitingInputStream.java @@ -33,6 +33,7 @@ package org.opensearch.index.snapshots.blobstore; import org.apache.lucene.store.RateLimiter; +import org.opensearch.common.StreamLimiter; import java.io.FilterInputStream; import java.io.IOException; @@ -46,45 +47,17 @@ */ public class RateLimitingInputStream extends FilterInputStream { - private final Supplier rateLimiterSupplier; + private final StreamLimiter streamLimiter; - private final Listener listener; - - private long bytesSinceLastRateLimit; - - /** - * Internal listener - * - * @opensearch.internal - */ - public interface Listener { - void onPause(long nanos); - } - - public RateLimitingInputStream(InputStream delegate, Supplier rateLimiterSupplier, Listener listener) { + public RateLimitingInputStream(InputStream delegate, Supplier rateLimiterSupplier, StreamLimiter.Listener listener) { super(delegate); - this.rateLimiterSupplier = rateLimiterSupplier; - this.listener = listener; - } - - private void maybePause(int bytes) throws IOException { - bytesSinceLastRateLimit += bytes; - final RateLimiter rateLimiter = rateLimiterSupplier.get(); - if (rateLimiter != null) { - if (bytesSinceLastRateLimit >= rateLimiter.getMinPauseCheckBytes()) { - long pause = rateLimiter.pause(bytesSinceLastRateLimit); - bytesSinceLastRateLimit = 0; - if (pause > 0) { - listener.onPause(pause); - } - } - } + this.streamLimiter = new StreamLimiter(rateLimiterSupplier, listener); } @Override public int read() throws IOException { int b = super.read(); - maybePause(1); + streamLimiter.maybePause(1); return b; } @@ -92,7 +65,7 @@ public int read() throws IOException { public int read(byte[] b, int off, int len) throws IOException { int n = super.read(b, off, len); if (n > 0) { - maybePause(n); + streamLimiter.maybePause(n); } return n; } diff --git a/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java b/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java index eefc1469a06a0..aa5d90cc65803 100644 --- a/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java +++ b/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java @@ -322,10 +322,10 @@ public String snapshot() { return snapshot; } - /** - * Returns list of files in the shard - * - * @return list of files + /* + Returns list of files in the shard + + @return list of files */ /** diff --git a/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java b/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java index 5e12517becaf2..7ad48cb56a33b 100644 --- a/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java @@ -8,6 +8,7 @@ package org.opensearch.index.store; +import org.apache.lucene.store.Directory; import org.opensearch.common.util.MovingAverage; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -15,128 +16,150 @@ import java.io.IOException; import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; /** - * Tracks the amount of bytes transferred between two {@link org.apache.lucene.store.Directory} instances + * Tracks the amount of bytes transferred between two {@link Directory} instances * * @opensearch.internal */ public class DirectoryFileTransferTracker { /** - * Cumulative size of files (in bytes) attempted to be transferred over from the source {@link org.apache.lucene.store.Directory} + * Cumulative size of files (in bytes) attempted to be transferred over from the source {@link Directory} */ - private volatile long transferredBytesStarted; + private final AtomicLong transferredBytesStarted = new AtomicLong(); /** - * Cumulative size of files (in bytes) successfully transferred over from the source {@link org.apache.lucene.store.Directory} + * Cumulative size of files (in bytes) successfully transferred over from the source {@link Directory} */ - private volatile long transferredBytesFailed; + private final AtomicLong transferredBytesFailed = new AtomicLong(); /** - * Cumulative size of files (in bytes) failed in transfer over from the source {@link org.apache.lucene.store.Directory} + * Cumulative size of files (in bytes) failed in transfer over from the source {@link Directory} */ - private volatile long transferredBytesSucceeded; + private final AtomicLong transferredBytesSucceeded = new AtomicLong(); /** - * Time in milliseconds for the last successful transfer from the source {@link org.apache.lucene.store.Directory} + * Time in milliseconds for the last successful transfer from the source {@link Directory} */ - private volatile long lastTransferTimestampMs; + private final AtomicLong lastTransferTimestampMs = new AtomicLong(); /** - * Provides moving average over the last N total size in bytes of files transferred from the source {@link org.apache.lucene.store.Directory}. + * Cumulative time in milliseconds spent in successful transfers from the source {@link Directory} + */ + private final AtomicLong totalTransferTimeInMs = new AtomicLong(); + + /** + * Provides moving average over the last N total size in bytes of files transferred from the source {@link Directory}. * N is window size */ - private volatile MovingAverage transferredBytesMovingAverageReference; + private final AtomicReference transferredBytesMovingAverageReference; - private volatile long lastSuccessfulTransferInBytes; + private final AtomicLong lastSuccessfulTransferInBytes = new AtomicLong(); /** - * Provides moving average over the last N transfer speed (in bytes/s) of segment files transferred from the source {@link org.apache.lucene.store.Directory}. + * Provides moving average over the last N transfer speed (in bytes/s) of segment files transferred from the source {@link Directory}. * N is window size */ - private volatile MovingAverage transferredBytesPerSecMovingAverageReference; + private final AtomicReference transferredBytesPerSecMovingAverageReference; private final int DIRECTORY_FILES_TRANSFER_DEFAULT_WINDOW_SIZE = 20; + // Getters and Setters, all are visible for testing public long getTransferredBytesStarted() { - return transferredBytesStarted; + return transferredBytesStarted.get(); } public void addTransferredBytesStarted(long size) { - transferredBytesStarted += size; + transferredBytesStarted.getAndAdd(size); } public long getTransferredBytesFailed() { - return transferredBytesFailed; + return transferredBytesFailed.get(); } - public void addTransferredBytesFailed(long size) { - transferredBytesFailed += size; + public void addTransferredBytesFailed(long size, long startTimeInMs) { + transferredBytesFailed.getAndAdd(size); + addTotalTransferTimeInMs(Math.max(1, System.currentTimeMillis() - startTimeInMs)); } public long getTransferredBytesSucceeded() { - return transferredBytesSucceeded; + return transferredBytesSucceeded.get(); } public void addTransferredBytesSucceeded(long size, long startTimeInMs) { - transferredBytesSucceeded += size; - updateLastSuccessfulTransferSize(size); + transferredBytesSucceeded.getAndAdd(size); + updateSuccessfulTransferSize(size); long currentTimeInMs = System.currentTimeMillis(); updateLastTransferTimestampMs(currentTimeInMs); long timeTakenInMS = Math.max(1, currentTimeInMs - startTimeInMs); + addTotalTransferTimeInMs(timeTakenInMS); addTransferredBytesPerSec((size * 1_000L) / timeTakenInMS); } public boolean isTransferredBytesPerSecAverageReady() { - return transferredBytesPerSecMovingAverageReference.isReady(); + return transferredBytesPerSecMovingAverageReference.get().isReady(); } public double getTransferredBytesPerSecAverage() { - return transferredBytesPerSecMovingAverageReference.getAverage(); + return transferredBytesPerSecMovingAverageReference.get().getAverage(); } - // Visible for testing public void addTransferredBytesPerSec(long bytesPerSec) { - this.transferredBytesPerSecMovingAverageReference.record(bytesPerSec); + this.transferredBytesPerSecMovingAverageReference.get().record(bytesPerSec); } public boolean isTransferredBytesAverageReady() { - return transferredBytesMovingAverageReference.isReady(); + return transferredBytesMovingAverageReference.get().isReady(); } public double getTransferredBytesAverage() { - return transferredBytesMovingAverageReference.getAverage(); + return transferredBytesMovingAverageReference.get().getAverage(); + } + + public void updateLastSuccessfulTransferInBytes(long size) { + lastSuccessfulTransferInBytes.set(size); } - // Visible for testing - public void updateLastSuccessfulTransferSize(long size) { - lastSuccessfulTransferInBytes = size; - this.transferredBytesMovingAverageReference.record(size); + public void updateSuccessfulTransferSize(long size) { + updateLastSuccessfulTransferInBytes(size); + this.transferredBytesMovingAverageReference.get().record(size); } public long getLastTransferTimestampMs() { - return lastTransferTimestampMs; + return lastTransferTimestampMs.get(); } - // Visible for testing public void updateLastTransferTimestampMs(long downloadTimestampInMs) { - this.lastTransferTimestampMs = downloadTimestampInMs; + this.lastTransferTimestampMs.set(downloadTimestampInMs); + } + + public void addTotalTransferTimeInMs(long totalTransferTimeInMs) { + this.totalTransferTimeInMs.addAndGet(totalTransferTimeInMs); + } + + public long getTotalTransferTimeInMs() { + return totalTransferTimeInMs.get(); } public DirectoryFileTransferTracker() { - transferredBytesMovingAverageReference = new MovingAverage(DIRECTORY_FILES_TRANSFER_DEFAULT_WINDOW_SIZE); - transferredBytesPerSecMovingAverageReference = new MovingAverage(DIRECTORY_FILES_TRANSFER_DEFAULT_WINDOW_SIZE); + transferredBytesMovingAverageReference = new AtomicReference<>(new MovingAverage(DIRECTORY_FILES_TRANSFER_DEFAULT_WINDOW_SIZE)); + transferredBytesPerSecMovingAverageReference = new AtomicReference<>( + new MovingAverage(DIRECTORY_FILES_TRANSFER_DEFAULT_WINDOW_SIZE) + ); } public DirectoryFileTransferTracker.Stats stats() { return new Stats( - transferredBytesStarted, - transferredBytesFailed, - transferredBytesSucceeded, - lastTransferTimestampMs, - transferredBytesMovingAverageReference.getAverage(), - lastSuccessfulTransferInBytes, - transferredBytesPerSecMovingAverageReference.getAverage() + transferredBytesStarted.get(), + transferredBytesFailed.get(), + transferredBytesSucceeded.get(), + lastTransferTimestampMs.get(), + totalTransferTimeInMs.get(), + transferredBytesMovingAverageReference.get().getAverage(), + lastSuccessfulTransferInBytes.get(), + transferredBytesPerSecMovingAverageReference.get().getAverage() ); } @@ -150,6 +173,7 @@ public static class Stats implements Writeable { public final long transferredBytesFailed; public final long transferredBytesSucceeded; public final long lastTransferTimestampMs; + public final long totalTransferTimeInMs; public final double transferredBytesMovingAverage; public final long lastSuccessfulTransferInBytes; public final double transferredBytesPerSecMovingAverage; @@ -159,6 +183,7 @@ public Stats( long transferredBytesFailed, long downloadBytesSucceeded, long lastTransferTimestampMs, + long totalTransferTimeInMs, double transferredBytesMovingAverage, long lastSuccessfulTransferInBytes, double transferredBytesPerSecMovingAverage @@ -167,6 +192,7 @@ public Stats( this.transferredBytesFailed = transferredBytesFailed; this.transferredBytesSucceeded = downloadBytesSucceeded; this.lastTransferTimestampMs = lastTransferTimestampMs; + this.totalTransferTimeInMs = totalTransferTimeInMs; this.transferredBytesMovingAverage = transferredBytesMovingAverage; this.lastSuccessfulTransferInBytes = lastSuccessfulTransferInBytes; this.transferredBytesPerSecMovingAverage = transferredBytesPerSecMovingAverage; @@ -177,6 +203,7 @@ public Stats(StreamInput in) throws IOException { this.transferredBytesFailed = in.readLong(); this.transferredBytesSucceeded = in.readLong(); this.lastTransferTimestampMs = in.readLong(); + this.totalTransferTimeInMs = in.readLong(); this.transferredBytesMovingAverage = in.readDouble(); this.lastSuccessfulTransferInBytes = in.readLong(); this.transferredBytesPerSecMovingAverage = in.readDouble(); @@ -188,6 +215,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(transferredBytesFailed); out.writeLong(transferredBytesSucceeded); out.writeLong(lastTransferTimestampMs); + out.writeLong(totalTransferTimeInMs); out.writeDouble(transferredBytesMovingAverage); out.writeLong(lastSuccessfulTransferInBytes); out.writeDouble(transferredBytesPerSecMovingAverage); @@ -203,6 +231,7 @@ public boolean equals(Object obj) { && transferredBytesFailed == stats.transferredBytesFailed && transferredBytesSucceeded == stats.transferredBytesSucceeded && lastTransferTimestampMs == stats.lastTransferTimestampMs + && totalTransferTimeInMs == stats.totalTransferTimeInMs && Double.compare(stats.transferredBytesMovingAverage, transferredBytesMovingAverage) == 0 && lastSuccessfulTransferInBytes == stats.lastSuccessfulTransferInBytes && Double.compare(stats.transferredBytesPerSecMovingAverage, transferredBytesPerSecMovingAverage) == 0; @@ -215,6 +244,7 @@ public int hashCode() { transferredBytesFailed, transferredBytesSucceeded, lastTransferTimestampMs, + totalTransferTimeInMs, transferredBytesMovingAverage, lastSuccessfulTransferInBytes, transferredBytesPerSecMovingAverage diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java index a5e02a5baed69..345583bbbd1be 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java @@ -8,15 +8,28 @@ package org.opensearch.index.store; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.Lock; +import org.opensearch.ExceptionsHelper; import org.opensearch.action.LatchedActionListener; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.exception.CorruptFileException; +import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; +import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; +import org.opensearch.common.blobstore.transfer.stream.OffsetRangeInputStream; import org.opensearch.core.action.ActionListener; +import org.opensearch.index.store.exception.ChecksumCombinationException; import java.io.FileNotFoundException; import java.io.IOException; @@ -26,12 +39,14 @@ import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.UnaryOperator; import java.util.stream.Collectors; +import static org.opensearch.common.blobstore.transfer.RemoteTransferContainer.checksumOfChecksum; + /** * A {@code RemoteDirectory} provides an abstraction layer for storing a list of files to a remote store. * A remoteDirectory contains only files (no sub-folder hierarchy). This class does not support all the methods in @@ -44,13 +59,33 @@ public class RemoteDirectory extends Directory { protected final BlobContainer blobContainer; + private static final Logger logger = LogManager.getLogger(RemoteDirectory.class); + + private final UnaryOperator uploadRateLimiter; + + private final UnaryOperator downloadRateLimiter; + + /** + * Number of bytes in the segment file to store checksum + */ + private static final int SEGMENT_CHECKSUM_BYTES = 8; public BlobContainer getBlobContainer() { return blobContainer; } public RemoteDirectory(BlobContainer blobContainer) { + this(blobContainer, UnaryOperator.identity(), UnaryOperator.identity()); + } + + public RemoteDirectory( + BlobContainer blobContainer, + UnaryOperator uploadRateLimiter, + UnaryOperator downloadRateLimiter + ) { this.blobContainer = blobContainer; + this.uploadRateLimiter = uploadRateLimiter; + this.downloadRateLimiter = downloadRateLimiter; } /** @@ -106,6 +141,17 @@ public void onFailure(Exception e) { } } + /** + * Returns stream emitted from by blob object. Should be used with a closeable block. + * + * @param fileName Name of file + * @return Stream from the blob object + * @throws IOException if fetch of stream fails with IO error + */ + public InputStream getBlobStream(String fileName) throws IOException { + return blobContainer.readBlob(fileName); + } + /** * Removes an existing file in the directory. * @@ -146,13 +192,24 @@ public IndexOutput createOutput(String name, IOContext context) { */ @Override public IndexInput openInput(String name, IOContext context) throws IOException { + return openInput(name, fileLength(name), context); + } + + public IndexInput openInput(String name, long fileLength, IOContext context) throws IOException { InputStream inputStream = null; try { inputStream = blobContainer.readBlob(name); - return new RemoteIndexInput(name, inputStream, fileLength(name)); + return new RemoteIndexInput(name, downloadRateLimiter.apply(inputStream), fileLength); } catch (Exception e) { // Incase the RemoteIndexInput creation fails, close the input stream to avoid file handler leak. - if (inputStream != null) inputStream.close(); + if (inputStream != null) { + try { + inputStream.close(); + } catch (Exception closeEx) { + e.addSuppressed(closeEx); + } + } + logger.error("Exception while reading blob for file: " + name + " for path " + blobContainer.path()); throw e; } } @@ -176,9 +233,9 @@ public void close() throws IOException { @Override public long fileLength(String name) throws IOException { // ToDo: Instead of calling remote store each time, keep a cache with segment metadata - Map metadata = blobContainer.listBlobsByPrefix(name); - if (metadata.containsKey(name)) { - return metadata.get(name).length(); + List metadata = blobContainer.listBlobsByPrefixInSortedOrder(name, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + if (metadata.size() == 1 && metadata.get(0).name().equals(name)) { + return metadata.get(0).length(); } throw new NoSuchFileException(name); } @@ -259,4 +316,104 @@ public Lock obtainLock(String name) throws IOException { public void delete() throws IOException { blobContainer.delete(); } + + public boolean copyFrom( + Directory from, + String src, + String remoteFileName, + IOContext context, + Runnable postUploadRunner, + ActionListener listener + ) { + if (blobContainer instanceof AsyncMultiStreamBlobContainer) { + try { + uploadBlob(from, src, remoteFileName, context, postUploadRunner, listener); + } catch (Exception e) { + listener.onFailure(e); + } + return true; + } + return false; + } + + private void uploadBlob( + Directory from, + String src, + String remoteFileName, + IOContext ioContext, + Runnable postUploadRunner, + ActionListener listener + ) throws Exception { + long expectedChecksum = calculateChecksumOfChecksum(from, src); + long contentLength; + try (IndexInput indexInput = from.openInput(src, ioContext)) { + contentLength = indexInput.length(); + } + boolean remoteIntegrityEnabled = false; + if (getBlobContainer() instanceof AsyncMultiStreamBlobContainer) { + remoteIntegrityEnabled = ((AsyncMultiStreamBlobContainer) getBlobContainer()).remoteIntegrityCheckSupported(); + } + RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( + src, + remoteFileName, + contentLength, + true, + WritePriority.NORMAL, + (size, position) -> uploadRateLimiter.apply(new OffsetRangeIndexInputStream(from.openInput(src, ioContext), size, position)), + expectedChecksum, + remoteIntegrityEnabled + ); + ActionListener completionListener = ActionListener.wrap(resp -> { + try { + postUploadRunner.run(); + listener.onResponse(null); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("Exception in segment postUpload for file [{}]", src), e); + listener.onFailure(e); + } + }, ex -> { + logger.error(() -> new ParameterizedMessage("Failed to upload blob {}", src), ex); + IOException corruptIndexException = ExceptionsHelper.unwrapCorruption(ex); + if (corruptIndexException != null) { + listener.onFailure(corruptIndexException); + return; + } + Throwable throwable = ExceptionsHelper.unwrap(ex, CorruptFileException.class); + if (throwable != null) { + CorruptFileException corruptFileException = (CorruptFileException) throwable; + listener.onFailure(new CorruptIndexException(corruptFileException.getMessage(), corruptFileException.getFileName())); + return; + } + listener.onFailure(ex); + }); + + completionListener = ActionListener.runBefore(completionListener, () -> { + try { + remoteTransferContainer.close(); + } catch (Exception e) { + logger.warn("Error occurred while closing streams", e); + } + }); + + WriteContext writeContext = remoteTransferContainer.createWriteContext(); + ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload(writeContext, completionListener); + } + + private long calculateChecksumOfChecksum(Directory directory, String file) throws IOException { + try (IndexInput indexInput = directory.openInput(file, IOContext.DEFAULT)) { + try { + return checksumOfChecksum(indexInput, SEGMENT_CHECKSUM_BYTES); + } catch (Exception e) { + throw new ChecksumCombinationException( + "Potentially corrupted file: Checksum combination failed while combining stored checksum " + + "and calculated checksum of stored checksum in segment file: " + + file + + ", directory: " + + directory, + file, + e + ); + } + } + } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 2ca01eb0fb931..988d52202f975 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfo; import org.apache.lucene.index.SegmentInfos; @@ -24,23 +23,18 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.Version; -import org.opensearch.ExceptionsHelper; import org.opensearch.common.UUIDs; -import org.opensearch.common.blobstore.VerifyingMultiStreamBlobContainer; -import org.opensearch.common.blobstore.exception.CorruptFileException; -import org.opensearch.common.blobstore.stream.write.WriteContext; -import org.opensearch.common.blobstore.stream.write.WritePriority; -import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; -import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.VersionedCodecStreamWrapper; +import org.opensearch.common.logging.Loggers; import org.opensearch.common.lucene.store.ByteArrayIndexInput; -import org.opensearch.common.util.ByteUtils; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.remote.RemoteStoreUtils; -import org.opensearch.index.store.exception.ChecksumCombinationException; import org.opensearch.index.store.lockmanager.FileLockInfo; import org.opensearch.index.store.lockmanager.RemoteStoreCommitLevelLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; +import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; @@ -48,21 +42,21 @@ import java.io.FileNotFoundException; import java.io.IOException; +import java.io.InputStream; import java.nio.file.NoSuchFileException; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; -import java.util.zip.CRC32; - -import com.jcraft.jzlib.JZlib; /** * A RemoteDirectory extension for remote segment store. We need to make sure we don't overwrite a segment file once uploaded. @@ -83,11 +77,6 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement */ public static final String SEGMENT_NAME_UUID_SEPARATOR = "__"; - /** - * Number of bytes in the segment file to store checksum - */ - private static final int SEGMENT_CHECKSUM_BYTES = 8; - /** * remoteDataDirectory is used to store segment files at path: cluster_UUID/index_UUID/shardId/segments/data */ @@ -114,7 +103,9 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement RemoteSegmentMetadata.METADATA_CODEC ); - private static final Logger logger = LogManager.getLogger(RemoteSegmentStoreDirectory.class); + private static final Logger staticLogger = LogManager.getLogger(RemoteSegmentStoreDirectory.class); + + private final Logger logger; /** * AtomicBoolean that ensures only one staleCommitDeletion activity is scheduled at a time. @@ -124,17 +115,21 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement private final AtomicLong metadataUploadCounter = new AtomicLong(0); + public static final int METADATA_FILES_TO_FETCH = 10; + public RemoteSegmentStoreDirectory( RemoteDirectory remoteDataDirectory, RemoteDirectory remoteMetadataDirectory, RemoteStoreLockManager mdLockManager, - ThreadPool threadPool + ThreadPool threadPool, + ShardId shardId ) throws IOException { super(remoteDataDirectory); this.remoteDataDirectory = remoteDataDirectory; this.remoteMetadataDirectory = remoteMetadataDirectory; this.mdLockManager = mdLockManager; this.threadPool = threadPool; + this.logger = Loggers.getLogger(getClass(), shardId); init(); } @@ -147,12 +142,14 @@ public RemoteSegmentStoreDirectory( * @throws IOException if there were any failures in reading the metadata file */ public RemoteSegmentMetadata init() throws IOException { + logger.debug("Start initialisation of remote segment metadata"); RemoteSegmentMetadata remoteSegmentMetadata = readLatestMetadataFile(); if (remoteSegmentMetadata != null) { this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(remoteSegmentMetadata.getMetadata()); } else { this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(); } + logger.debug("Initialisation of remote segment metadata completed"); return remoteSegmentMetadata; } @@ -164,8 +161,9 @@ public RemoteSegmentMetadata init() throws IOException { * * @throws IOException if there were any failures in reading the metadata file */ - public RemoteSegmentMetadata initializeToSpecificCommit(long primaryTerm, long commitGeneration) throws IOException { - String metadataFile = getMetadataFileForCommit(primaryTerm, commitGeneration); + public RemoteSegmentMetadata initializeToSpecificCommit(long primaryTerm, long commitGeneration, String acquirerId) throws IOException { + String metadataFilePrefix = MetadataFilenameUtils.getMetadataFilePrefixForCommit(primaryTerm, commitGeneration); + String metadataFile = ((RemoteStoreMetadataLockManager) mdLockManager).fetchLock(metadataFilePrefix, acquirerId); RemoteSegmentMetadata remoteSegmentMetadata = readMetadataFile(metadataFile); if (remoteSegmentMetadata != null) { this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(remoteSegmentMetadata.getMetadata()); @@ -193,9 +191,11 @@ public RemoteSegmentMetadata readLatestMetadataFile() throws IOException { List metadataFiles = remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ); + RemoteStoreUtils.verifyNoMultipleWriters(metadataFiles, MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen); + if (metadataFiles.isEmpty() == false) { String latestMetadataFile = metadataFiles.get(0); logger.trace("Reading latest Metadata file {}", latestMetadataFile); @@ -208,9 +208,8 @@ public RemoteSegmentMetadata readLatestMetadataFile() throws IOException { } private RemoteSegmentMetadata readMetadataFile(String metadataFilename) throws IOException { - try (IndexInput indexInput = remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)) { - byte[] metadataBytes = new byte[(int) indexInput.length()]; - indexInput.readBytes(metadataBytes, 0, (int) indexInput.length()); + try (InputStream inputStream = remoteMetadataDirectory.getBlobStream(metadataFilename)) { + byte[] metadataBytes = inputStream.readAllBytes(); return metadataStreamWrapper.readStream(new ByteArrayIndexInput(metadataFilename, metadataBytes)); } } @@ -266,7 +265,7 @@ public static UploadedSegmentMetadata fromString(String uploadedFilename) { String[] values = uploadedFilename.split(SEPARATOR); UploadedSegmentMetadata metadata = new UploadedSegmentMetadata(values[0], values[1], values[2], Long.parseLong(values[3])); if (values.length < 5) { - logger.error("Lucene version is missing for UploadedSegmentMetadata: " + uploadedFilename); + staticLogger.error("Lucene version is missing for UploadedSegmentMetadata: " + uploadedFilename); } metadata.setWrittenByMajor(Integer.parseInt(values[4])); @@ -313,12 +312,13 @@ static String getMetadataFilePrefixForCommit(long primaryTerm, long generation) } // Visible for testing - static String getMetadataFilename( + public static String getMetadataFilename( long primaryTerm, long generation, long translogGeneration, long uploadCounter, - int metadataVersion + int metadataVersion, + String nodeId ) { return String.join( SEPARATOR, @@ -327,6 +327,7 @@ static String getMetadataFilename( RemoteStoreUtils.invertLong(generation), RemoteStoreUtils.invertLong(translogGeneration), RemoteStoreUtils.invertLong(uploadCounter), + String.valueOf(Objects.hash(nodeId)), RemoteStoreUtils.invertLong(System.currentTimeMillis()), String.valueOf(metadataVersion) ); @@ -341,6 +342,19 @@ static long getPrimaryTerm(String[] filenameTokens) { static long getGeneration(String[] filenameTokens) { return RemoteStoreUtils.invertLong(filenameTokens[2]); } + + public static Tuple getNodeIdByPrimaryTermAndGen(String filename) { + String[] tokens = filename.split(SEPARATOR); + if (tokens.length < 8) { + // For versions < 2.11, we don't have node id. + return null; + } + String primaryTermAndGen = String.join(SEPARATOR, tokens[1], tokens[2], tokens[3]); + + String nodeId = tokens[5]; + return new Tuple<>(primaryTermAndGen, nodeId); + } + } /** @@ -414,8 +428,9 @@ public IndexOutput createOutput(String name, IOContext context) throws IOExcepti @Override public IndexInput openInput(String name, IOContext context) throws IOException { String remoteFilename = getExistingRemoteFilename(name); + long fileLength = fileLength(name); if (remoteFilename != null) { - return remoteDataDirectory.openInput(remoteFilename, context); + return remoteDataDirectory.openInput(remoteFilename, fileLength, context); } else { throw new NoSuchFileException(name); } @@ -433,77 +448,25 @@ public IndexInput openInput(String name, IOContext context) throws IOException { * @param listener Listener to handle upload callback events */ public void copyFrom(Directory from, String src, IOContext context, ActionListener listener) { - if (remoteDataDirectory.getBlobContainer() instanceof VerifyingMultiStreamBlobContainer) { - try { - String remoteFilename = getNewRemoteSegmentFilename(src); - uploadBlob(from, src, remoteFilename, context, listener); - } catch (Exception e) { - listener.onFailure(e); - } - } else { - try { + try { + final String remoteFileName = getNewRemoteSegmentFilename(src); + boolean uploaded = remoteDataDirectory.copyFrom(from, src, remoteFileName, context, () -> { + try { + postUpload(from, src, remoteFileName, getChecksumOfLocalFile(from, src)); + } catch (IOException e) { + throw new RuntimeException("Exception in segment postUpload for file " + src, e); + } + }, listener); + if (uploaded == false) { copyFrom(from, src, src, context); listener.onResponse(null); - } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("Exception while uploading file {} to the remote segment store", src), e); - listener.onFailure(e); } + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("Exception while uploading file {} to the remote segment store", src), e); + listener.onFailure(e); } } - private void uploadBlob(Directory from, String src, String remoteFileName, IOContext ioContext, ActionListener listener) - throws Exception { - long expectedChecksum = calculateChecksumOfChecksum(from, src); - long contentLength; - try (IndexInput indexInput = from.openInput(src, ioContext)) { - contentLength = indexInput.length(); - } - RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( - src, - remoteFileName, - contentLength, - true, - WritePriority.NORMAL, - (size, position) -> new OffsetRangeIndexInputStream(from.openInput(src, ioContext), size, position), - expectedChecksum, - remoteDataDirectory.getBlobContainer() instanceof VerifyingMultiStreamBlobContainer - ); - ActionListener completionListener = ActionListener.wrap(resp -> { - try { - postUpload(from, src, remoteFileName, getChecksumOfLocalFile(from, src)); - listener.onResponse(null); - } catch (Exception e) { - logger.error(() -> new ParameterizedMessage("Exception in segment postUpload for file [{}]", src), e); - listener.onFailure(e); - } - }, ex -> { - logger.error(() -> new ParameterizedMessage("Failed to upload blob {}", src), ex); - IOException corruptIndexException = ExceptionsHelper.unwrapCorruption(ex); - if (corruptIndexException != null) { - listener.onFailure(corruptIndexException); - return; - } - Throwable throwable = ExceptionsHelper.unwrap(ex, CorruptFileException.class); - if (throwable != null) { - CorruptFileException corruptFileException = (CorruptFileException) throwable; - listener.onFailure(new CorruptIndexException(corruptFileException.getMessage(), corruptFileException.getFileName())); - return; - } - listener.onFailure(ex); - }); - - completionListener = ActionListener.runBefore(completionListener, () -> { - try { - remoteTransferContainer.close(); - } catch (Exception e) { - logger.warn("Error occurred while closing streams", e); - } - }); - - WriteContext writeContext = remoteTransferContainer.createWriteContext(); - ((VerifyingMultiStreamBlobContainer) remoteDataDirectory.getBlobContainer()).asyncBlobUpload(writeContext, completionListener); - } - /** * This acquires a lock on a given commit by creating a lock file in lock directory using {@code FileLockInfo} * @@ -579,13 +542,6 @@ String getMetadataFileForCommit(long primaryTerm, long generation) throws IOExce return metadataFiles.get(0); } - public void copyFrom(Directory from, String src, String dest, IOContext context, String checksum) throws IOException { - String remoteFilename; - remoteFilename = getNewRemoteSegmentFilename(dest); - remoteDataDirectory.copyFrom(from, src, remoteFilename, context); - postUpload(from, src, remoteFilename, checksum); - } - private void postUpload(Directory from, String src, String remoteFilename, String checksum) throws IOException { UploadedSegmentMetadata segmentMetadata = new UploadedSegmentMetadata(src, remoteFilename, checksum, from.fileLength(src)); segmentsUploadedToRemoteStore.put(src, segmentMetadata); @@ -597,7 +553,9 @@ private void postUpload(Directory from, String src, String remoteFilename, Strin */ @Override public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { - copyFrom(from, src, dest, context, getChecksumOfLocalFile(from, src)); + String remoteFilename = getNewRemoteSegmentFilename(dest); + remoteDataDirectory.copyFrom(from, src, remoteFilename, context); + postUpload(from, src, remoteFilename, getChecksumOfLocalFile(from, src)); } /** @@ -622,6 +580,7 @@ public boolean containsFile(String localFilename, String checksum) { * @param storeDirectory instance of local directory to temporarily create metadata file before upload * @param translogGeneration translog generation * @param replicationCheckpoint ReplicationCheckpoint of primary shard + * @param nodeId node id * @throws IOException in case of I/O error while uploading the metadata file */ public void uploadMetadata( @@ -629,7 +588,8 @@ public void uploadMetadata( SegmentInfos segmentInfosSnapshot, Directory storeDirectory, long translogGeneration, - ReplicationCheckpoint replicationCheckpoint + ReplicationCheckpoint replicationCheckpoint, + String nodeId ) throws IOException { synchronized (this) { String metadataFilename = MetadataFilenameUtils.getMetadataFilename( @@ -637,7 +597,8 @@ public void uploadMetadata( segmentInfosSnapshot.getGeneration(), translogGeneration, metadataUploadCounter.incrementAndGet(), - RemoteSegmentMetadata.CURRENT_VERSION + RemoteSegmentMetadata.CURRENT_VERSION, + nodeId ); try { try (IndexOutput indexOutput = storeDirectory.createOutput(metadataFilename, IOContext.DEFAULT)) { @@ -716,7 +677,7 @@ private Map getSegmentToLuceneVersion(Collection segmen */ private void tryAndDeleteLocalFile(String filename, Directory directory) { try { - logger.trace("Deleting file: " + filename); + logger.debug("Deleting file: " + filename); directory.deleteFile(filename); } catch (NoSuchFileException | FileNotFoundException e) { logger.trace("Exception while deleting. Missing file : " + filename, e); @@ -731,27 +692,6 @@ private String getChecksumOfLocalFile(Directory directory, String file) throws I } } - private long calculateChecksumOfChecksum(Directory directory, String file) throws IOException { - try (IndexInput indexInput = directory.openInput(file, IOContext.DEFAULT)) { - long storedChecksum = CodecUtil.retrieveChecksum(indexInput); - CRC32 checksumOfChecksum = new CRC32(); - checksumOfChecksum.update(ByteUtils.toByteArrayBE(storedChecksum)); - try { - return JZlib.crc32_combine(storedChecksum, checksumOfChecksum.getValue(), SEGMENT_CHECKSUM_BYTES); - } catch (Exception e) { - throw new ChecksumCombinationException( - "Potentially corrupted file: Checksum combination failed while combining stored checksum " - + "and calculated checksum of stored checksum in segment file: " - + file - + ", directory: " - + directory, - file, - e - ); - } - } - } - private String getExistingRemoteFilename(String localFilename) { if (segmentsUploadedToRemoteStore.containsKey(localFilename)) { return segmentsUploadedToRemoteStore.get(localFilename).uploadedFilename; @@ -787,7 +727,7 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException Integer.MAX_VALUE ); if (sortedMetadataFileList.size() <= lastNMetadataFilesToKeep) { - logger.trace( + logger.debug( "Number of commits in remote segment store={}, lastNMetadataFilesToKeep={}", sortedMetadataFileList.size(), lastNMetadataFilesToKeep @@ -815,6 +755,11 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException }).collect(Collectors.toList()); sortedMetadataFileList.removeAll(metadataFilesToBeDeleted); + logger.debug( + "metadataFilesEligibleToDelete={} metadataFilesToBeDeleted={}", + metadataFilesEligibleToDelete, + metadataFilesEligibleToDelete + ); Map activeSegmentFilesMetadataMap = new HashMap<>(); Set activeSegmentRemoteFilenames = new HashSet<>(); @@ -832,9 +777,11 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException .map(metadata -> metadata.uploadedFilename) .collect(Collectors.toSet()); AtomicBoolean deletionSuccessful = new AtomicBoolean(true); + List nonActiveDeletedSegmentFiles = new ArrayList<>(); staleSegmentRemoteFilenames.stream().filter(file -> !activeSegmentRemoteFilenames.contains(file)).forEach(file -> { try { remoteDataDirectory.deleteFile(file); + nonActiveDeletedSegmentFiles.add(file); if (!activeSegmentFilesMetadataMap.containsKey(getLocalSegmentFilename(file))) { segmentsUploadedToRemoteStore.remove(getLocalSegmentFilename(file)); } @@ -849,37 +796,45 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException ); } }); + logger.debug("nonActiveDeletedSegmentFiles={}", nonActiveDeletedSegmentFiles); if (deletionSuccessful.get()) { - logger.trace("Deleting stale metadata file {} from remote segment store", metadataFile); + logger.debug("Deleting stale metadata file {} from remote segment store", metadataFile); remoteMetadataDirectory.deleteFile(metadataFile); } } } + public void deleteStaleSegmentsAsync(int lastNMetadataFilesToKeep) { + deleteStaleSegmentsAsync(lastNMetadataFilesToKeep, ActionListener.wrap(r -> {}, e -> {})); + } + /** * Delete stale segment and metadata files asynchronously. * This method calls {@link RemoteSegmentStoreDirectory#deleteStaleSegments(int)} in an async manner. * * @param lastNMetadataFilesToKeep number of metadata files to keep */ - public void deleteStaleSegmentsAsync(int lastNMetadataFilesToKeep) { + public void deleteStaleSegmentsAsync(int lastNMetadataFilesToKeep, ActionListener listener) { if (canDeleteStaleCommits.compareAndSet(true, false)) { try { threadPool.executor(ThreadPool.Names.REMOTE_PURGE).execute(() -> { try { deleteStaleSegments(lastNMetadataFilesToKeep); + listener.onResponse(null); } catch (Exception e) { - logger.info( + logger.error( "Exception while deleting stale commits from remote segment store, will retry delete post next commit", e ); + listener.onFailure(e); } finally { canDeleteStaleCommits.set(true); } }); } catch (Exception e) { - logger.info("Exception occurred while scheduling deleteStaleCommits", e); + logger.error("Exception occurred while scheduling deleteStaleCommits", e); canDeleteStaleCommits.set(true); + listener.onFailure(e); } } } @@ -894,7 +849,7 @@ private boolean deleteIfEmpty() throws IOException { 1 ); if (metadataFiles.size() != 0) { - logger.info("Remote directory still has files , not deleting the path"); + logger.info("Remote directory still has files, not deleting the path"); return false; } @@ -910,8 +865,8 @@ private boolean deleteIfEmpty() throws IOException { return true; } + @Override public void close() throws IOException { - deleteStaleSegmentsAsync(0); - deleteIfEmpty(); + deleteStaleSegmentsAsync(0, ActionListener.wrap(r -> deleteIfEmpty(), e -> logger.error("Failed to cleanup remote directory"))); } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index 3bec84f287ce4..a5e89ec6a8327 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -9,12 +9,12 @@ package org.opensearch.index.store; import org.apache.lucene.store.Directory; -import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; -import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -46,35 +46,34 @@ public RemoteSegmentStoreDirectoryFactory(Supplier reposito public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throws IOException { String repositoryName = indexSettings.getRemoteStoreRepository(); String indexUUID = indexSettings.getIndex().getUUID(); - String shardId = String.valueOf(path.getShardId().getId()); - - return newDirectory(repositoryName, indexUUID, shardId); + return newDirectory(repositoryName, indexUUID, path.getShardId()); } - public Directory newDirectory(String repositoryName, String indexUUID, String shardId) throws IOException { + public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId) throws IOException { try (Repository repository = repositoriesService.get().repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; - BlobPath commonBlobPath = ((BlobStoreRepository) repository).basePath(); - commonBlobPath = commonBlobPath.add(indexUUID).add(shardId).add(SEGMENTS); + BlobStoreRepository blobStoreRepository = ((BlobStoreRepository) repository); + BlobPath commonBlobPath = blobStoreRepository.basePath(); + commonBlobPath = commonBlobPath.add(indexUUID).add(String.valueOf(shardId.id())).add(SEGMENTS); - RemoteDirectory dataDirectory = createRemoteDirectory(repository, commonBlobPath, "data"); - RemoteDirectory metadataDirectory = createRemoteDirectory(repository, commonBlobPath, "metadata"); - RemoteStoreMetadataLockManager mdLockManager = RemoteStoreLockManagerFactory.newLockManager( + RemoteDirectory dataDirectory = new RemoteDirectory( + blobStoreRepository.blobStore().blobContainer(commonBlobPath.add("data")), + blobStoreRepository::maybeRateLimitRemoteUploadTransfers, + blobStoreRepository::maybeRateLimitRemoteDownloadTransfers + ); + RemoteDirectory metadataDirectory = new RemoteDirectory( + blobStoreRepository.blobStore().blobContainer(commonBlobPath.add("metadata")) + ); + RemoteStoreLockManager mdLockManager = RemoteStoreLockManagerFactory.newLockManager( repositoriesService.get(), repositoryName, indexUUID, - shardId + String.valueOf(shardId.id()) ); - return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool); + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool, shardId); } catch (RepositoryMissingException e) { throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", e); } } - - private RemoteDirectory createRemoteDirectory(Repository repository, BlobPath commonBlobPath, String extention) { - BlobPath extendedPath = commonBlobPath.add(extention); - BlobContainer dataBlobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(extendedPath); - return new RemoteDirectory(dataBlobContainer); - } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java b/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java new file mode 100644 index 0000000000000..727c57afd289b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java @@ -0,0 +1,165 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.common.logging.Loggers; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.Collection; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ExecutionException; + +/** + * Helper class to downloads files from a {@link RemoteSegmentStoreDirectory} + * instance to a local {@link Directory} instance in parallel depending on thread + * pool size and recovery settings. + */ +@InternalApi +public final class RemoteStoreFileDownloader { + private final Logger logger; + private final ThreadPool threadPool; + private final RecoverySettings recoverySettings; + + public RemoteStoreFileDownloader(ShardId shardId, ThreadPool threadPool, RecoverySettings recoverySettings) { + this.logger = Loggers.getLogger(RemoteStoreFileDownloader.class, shardId); + this.threadPool = threadPool; + this.recoverySettings = recoverySettings; + } + + /** + * Copies the given segments from the remote segment store to the given + * local directory. + * @param source The remote directory to copy segment files from + * @param destination The local directory to copy segment files to + * @param toDownloadSegments The list of segment files to download + * @param listener Callback listener to be notified upon completion + */ + public void downloadAsync( + CancellableThreads cancellableThreads, + Directory source, + Directory destination, + Collection toDownloadSegments, + ActionListener listener + ) { + downloadInternal(cancellableThreads, source, destination, null, toDownloadSegments, () -> {}, listener); + } + + /** + * Copies the given segments from the remote segment store to the given + * local directory, while also copying the segments _to_ another remote directory. + * @param source The remote directory to copy segment files from + * @param destination The local directory to copy segment files to + * @param secondDestination The second remote directory that segment files are + * copied to after being copied to the local directory + * @param toDownloadSegments The list of segment files to download + * @param onFileCompletion A generic runnable that is invoked after each file download. + * Must be thread safe as this may be invoked concurrently from + * different threads. + */ + public void download( + Directory source, + Directory destination, + Directory secondDestination, + Collection toDownloadSegments, + Runnable onFileCompletion + ) throws InterruptedException, IOException { + final CancellableThreads cancellableThreads = new CancellableThreads(); + final PlainActionFuture listener = PlainActionFuture.newFuture(); + downloadInternal(cancellableThreads, source, destination, secondDestination, toDownloadSegments, onFileCompletion, listener); + try { + listener.get(); + } catch (ExecutionException e) { + if (e.getCause() instanceof RuntimeException) { + throw (RuntimeException) e.getCause(); + } else if (e.getCause() instanceof IOException) { + throw (IOException) e.getCause(); + } + throw new RuntimeException(e); + } catch (InterruptedException e) { + // If the blocking call on the PlainActionFuture itself is interrupted, then we must + // cancel the asynchronous work we were waiting on + cancellableThreads.cancel(e.getMessage()); + Thread.currentThread().interrupt(); + throw e; + } + } + + private void downloadInternal( + CancellableThreads cancellableThreads, + Directory source, + Directory destination, + @Nullable Directory secondDestination, + Collection toDownloadSegments, + Runnable onFileCompletion, + ActionListener listener + ) { + final Queue queue = new ConcurrentLinkedQueue<>(toDownloadSegments); + // Choose the minimum of: + // - number of files to download + // - max thread pool size + // - "indices.recovery.max_concurrent_remote_store_streams" setting + final int threads = Math.min( + toDownloadSegments.size(), + Math.min(threadPool.info(ThreadPool.Names.REMOTE_RECOVERY).getMax(), recoverySettings.getMaxConcurrentRemoteStoreStreams()) + ); + logger.trace("Starting download of {} files with {} threads", queue.size(), threads); + final ActionListener allFilesListener = new GroupedActionListener<>(ActionListener.map(listener, r -> null), threads); + for (int i = 0; i < threads; i++) { + copyOneFile(cancellableThreads, source, destination, secondDestination, queue, onFileCompletion, allFilesListener); + } + } + + private void copyOneFile( + CancellableThreads cancellableThreads, + Directory source, + Directory destination, + @Nullable Directory secondDestination, + Queue queue, + Runnable onFileCompletion, + ActionListener listener + ) { + final String file = queue.poll(); + if (file == null) { + // Queue is empty, so notify listener we are done + listener.onResponse(null); + } else { + threadPool.executor(ThreadPool.Names.REMOTE_RECOVERY).submit(() -> { + logger.trace("Downloading file {}", file); + try { + cancellableThreads.executeIO(() -> { + destination.copyFrom(source, file, file, IOContext.DEFAULT); + onFileCompletion.run(); + if (secondDestination != null) { + secondDestination.copyFrom(destination, file, file, IOContext.DEFAULT); + } + }); + } catch (Exception e) { + // Clear the queue to stop any future processing, report the failure, then return + queue.clear(); + listener.onFailure(e); + return; + } + copyOneFile(cancellableThreads, source, destination, secondDestination, queue, onFileCompletion, listener); + }); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 4f51994a6ac2f..b822742de6e97 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -66,6 +66,7 @@ import org.apache.lucene.util.Version; import org.opensearch.ExceptionsHelper; import org.opensearch.common.UUIDs; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.logging.Loggers; import org.opensearch.common.lucene.Lucene; @@ -92,6 +93,7 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.AbstractIndexShardComponent; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardPath; import org.opensearch.index.translog.Translog; import java.io.Closeable; @@ -179,6 +181,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref private final ReentrantReadWriteLock metadataLock = new ReentrantReadWriteLock(); private final ShardLock shardLock; private final OnClose onClose; + private final ShardPath shardPath; // used to ref count files when a new Reader is opened for PIT/Scroll queries // prevents segment files deletion until the PIT/Scroll expires or is discarded @@ -192,10 +195,17 @@ protected void closeInternal() { }; public Store(ShardId shardId, IndexSettings indexSettings, Directory directory, ShardLock shardLock) { - this(shardId, indexSettings, directory, shardLock, OnClose.EMPTY); + this(shardId, indexSettings, directory, shardLock, OnClose.EMPTY, null); } - public Store(ShardId shardId, IndexSettings indexSettings, Directory directory, ShardLock shardLock, OnClose onClose) { + public Store( + ShardId shardId, + IndexSettings indexSettings, + Directory directory, + ShardLock shardLock, + OnClose onClose, + ShardPath shardPath + ) { super(shardId, indexSettings); final TimeValue refreshInterval = indexSettings.getValue(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING); logger.debug("store stats are refreshed with refresh_interval [{}]", refreshInterval); @@ -203,6 +213,7 @@ public Store(ShardId shardId, IndexSettings indexSettings, Directory directory, this.directory = new StoreDirectory(sizeCachingDir, Loggers.getLogger("index.store.deletes", shardId)); this.shardLock = shardLock; this.onClose = onClose; + this.shardPath = shardPath; assert onClose != null; assert shardLock != null; assert shardLock.getShardId().equals(shardId); @@ -213,6 +224,11 @@ public Directory directory() { return directory; } + @InternalApi + public ShardPath shardPath() { + return shardPath; + } + /** * Returns the last committed segments info for this store * @@ -283,14 +299,15 @@ final void ensureOpen() { /** * Returns a new MetadataSnapshot for the given commit. If the given commit is null * the latest commit point is used. - * + *

              * Note that this method requires the caller verify it has the right to access the store and * no concurrent file changes are happening. If in doubt, you probably want to use one of the following: - * + *

              * {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, Logger)} to read a meta data while locking * {@link IndexShard#snapshotStoreMetadata()} to safely read from an existing shard * {@link IndexShard#acquireLastIndexCommit(boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed - * @param commit the index commit to read the snapshot from or null if the latest snapshot should be read from the + * + * @param commit the index commit to read the snapshot from or {@code null} if the latest snapshot should be read from the * directory * @throws CorruptIndexException if the lucene index is corrupted. This can be caused by a checksum mismatch or an * unexpected exception when opening the index reading the segments file. @@ -314,10 +331,10 @@ public MetadataSnapshot getMetadata() throws IOException { /** * Returns a new MetadataSnapshot for the given commit. If the given commit is null * the latest commit point is used. - * + *

              * Note that this method requires the caller verify it has the right to access the store and * no concurrent file changes are happening. If in doubt, you probably want to use one of the following: - * + *

              * {@link #readMetadataSnapshot(Path, ShardId, NodeEnvironment.ShardLocker, Logger)} to read a meta data while locking * {@link IndexShard#snapshotStoreMetadata()} to safely read from an existing shard * {@link IndexShard#acquireLastIndexCommit(boolean)} to get an {@link IndexCommit} which is safe to use but has to be freed @@ -369,7 +386,13 @@ public MetadataSnapshot getMetadata(SegmentInfos segmentInfos) throws IOExceptio */ public Map getSegmentMetadataMap(SegmentInfos segmentInfos) throws IOException { assert indexSettings.isSegRepEnabled(); - return loadMetadata(segmentInfos, directory, logger, true).fileMetadata; + failIfCorrupted(); + try { + return loadMetadata(segmentInfos, directory, logger, true).fileMetadata; + } catch (NoSuchFileException | CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { + markStoreCorrupted(ex); + throw ex; + } } /** @@ -784,7 +807,7 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetadata) thr /** * Segment replication method - * + *

              * This method takes the segment info bytes to build SegmentInfos. It inc'refs files pointed by passed in SegmentInfos * bytes to ensure they are not deleted. * @@ -859,7 +882,7 @@ public void beforeClose() { * have the ability to create a writer directly from a SegmentInfos object. To promote the replica as a primary and avoid reindexing, we must first commit * on the replica so that it can be opened with a writeable engine. Further, InternalEngine currently invokes `trimUnsafeCommits` which reverts the engine to a previous safeCommit where the max seqNo is less than or equal * to the current global checkpoint. It is likely that the replica has a maxSeqNo that is higher than the global cp and a new commit will be wiped. - * + *

              * To get around these limitations, this method first creates an IndexCommit directly from SegmentInfos, it then * uses an appending IW to create an IndexCommit from the commit created on SegmentInfos. * This ensures that 1. All files in the new commit are fsynced and 2. Deletes older commit points so the only commit to start from is our new commit. @@ -950,14 +973,14 @@ public void copyFrom(Directory from, String src, String dest, IOContext context) long fileSize = from.fileLength(src); beforeDownload(fileSize); boolean success = false; + long startTime = System.currentTimeMillis(); try { - long startTime = System.currentTimeMillis(); super.copyFrom(from, src, dest, context); success = true; afterDownload(fileSize, startTime); } finally { if (!success) { - downloadFailed(fileSize); + downloadFailed(fileSize, startTime); } } } @@ -983,8 +1006,8 @@ private void afterDownload(long fileSize, long startTimeInMs) { /** * Updates the amount of bytes failed in download */ - private void downloadFailed(long fileSize) { - directoryFileTransferTracker.addTransferredBytesFailed(fileSize); + private void downloadFailed(long fileSize, long startTimeInMs) { + directoryFileTransferTracker.addTransferredBytesFailed(fileSize, startTimeInMs); } } diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/FileLockInfo.java b/server/src/main/java/org/opensearch/index/store/lockmanager/FileLockInfo.java index 24f42743e1a04..b6be60c489a6c 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/FileLockInfo.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/FileLockInfo.java @@ -21,6 +21,7 @@ public class FileLockInfo implements LockInfo { private String fileToLock; private String acquirerId; + private static final int INVALID_INDEX = -1; public String getAcquirerId() { return acquirerId; @@ -88,21 +89,34 @@ static String generateLockName(String fileToLock, String acquirerId) { } public static String getFileToLockNameFromLock(String lockName) { - String[] lockNameTokens = lockName.split(RemoteStoreLockManagerUtils.SEPARATOR); - - if (lockNameTokens.length != 2) { - throw new IllegalArgumentException("Provided Lock Name " + lockName + " is not Valid."); + // use proper separator for the lock file depending on the version it is created + String lockSeparator = lockName.endsWith(RemoteStoreLockManagerUtils.PRE_OS210_LOCK_FILE_EXTENSION) + ? RemoteStoreLockManagerUtils.PRE_OS210_LOCK_SEPARATOR + : RemoteStoreLockManagerUtils.SEPARATOR; + final int indexOfSeparator = lockName.lastIndexOf(lockSeparator); + if (indexOfSeparator == INVALID_INDEX) { + throw new IllegalArgumentException("Provided lock name: " + lockName + " is invalid with separator: " + lockSeparator); } - return lockNameTokens[0]; + return lockName.substring(0, indexOfSeparator); } public static String getAcquirerIdFromLock(String lockName) { - String[] lockNameTokens = lockName.split(RemoteStoreLockManagerUtils.SEPARATOR); + String lockExtension = RemoteStoreLockManagerUtils.LOCK_FILE_EXTENSION; + String lockSeparator = RemoteStoreLockManagerUtils.SEPARATOR; - if (lockNameTokens.length != 2) { - throw new IllegalArgumentException("Provided Lock Name " + lockName + " is not Valid."); + // check if lock file is created on version <=2.10 + if (lockName.endsWith(RemoteStoreLockManagerUtils.PRE_OS210_LOCK_FILE_EXTENSION)) { + lockSeparator = RemoteStoreLockManagerUtils.PRE_OS210_LOCK_SEPARATOR; + lockExtension = RemoteStoreLockManagerUtils.PRE_OS210_LOCK_FILE_EXTENSION; + } + final int indexOfSeparator = lockName.lastIndexOf(lockSeparator); + final int indexOfExt = lockName.lastIndexOf(lockExtension); + if (indexOfSeparator == INVALID_INDEX || indexOfExt == INVALID_INDEX) { + throw new IllegalArgumentException( + "Provided lock name: " + lockName + " is invalid with separator: " + lockSeparator + " and extension: " + lockExtension + ); } - return lockNameTokens[1].replace(RemoteStoreLockManagerUtils.LOCK_FILE_EXTENSION, ""); + return lockName.substring(indexOfSeparator + lockSeparator.length(), indexOfExt); } } diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java index e866551eae143..1a306f3261094 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java @@ -33,7 +33,7 @@ public RemoteStoreLockManagerFactory(Supplier repositoriesS this.repositoriesService = repositoriesService; } - public RemoteStoreMetadataLockManager newLockManager(String repositoryName, String indexUUID, String shardId) throws IOException { + public RemoteStoreLockManager newLockManager(String repositoryName, String indexUUID, String shardId) throws IOException { return newLockManager(repositoriesService.get(), repositoryName, indexUUID, shardId); } @@ -58,6 +58,12 @@ public static RemoteStoreMetadataLockManager newLockManager( } } + // TODO: remove this once we add poller in place to trigger remote store cleanup + // see: https://github.com/opensearch-project/OpenSearch/issues/8469 + public Supplier getRepositoriesService() { + return repositoriesService; + } + private static RemoteBufferedOutputDirectory createRemoteBufferedOutputDirectory( Repository repository, BlobPath commonBlobPath, diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerUtils.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerUtils.java index 452dfc329d88b..d5fb2722a64dc 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerUtils.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerUtils.java @@ -15,8 +15,11 @@ */ public class RemoteStoreLockManagerUtils { static final String FILE_TO_LOCK_NAME = "file_to_lock"; - static final String SEPARATOR = "___"; - static final String LOCK_FILE_EXTENSION = ".lock"; + static final String PRE_OS210_LOCK_SEPARATOR = "___"; + static final String SEPARATOR = "..."; + // for versions <= 2.10, we have lock files with this extension. + static final String PRE_OS210_LOCK_FILE_EXTENSION = ".lock"; + static final String LOCK_FILE_EXTENSION = ".v2_lock"; static final String ACQUIRER_ID = "acquirer_id"; public static final String NO_TTL = "-1"; static final String LOCK_EXPIRY_TIME = "lock_expiry_time"; diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java index fd7906729e314..756905d02229a 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java @@ -14,10 +14,13 @@ import org.apache.lucene.store.IndexOutput; import org.opensearch.index.store.RemoteBufferedOutputDirectory; +import java.io.FileNotFoundException; import java.io.IOException; import java.nio.file.NoSuchFileException; import java.util.Collection; +import java.util.List; import java.util.Objects; +import java.util.stream.Collectors; /** * A Class that implements Remote Store Lock Manager by creating lock files for the remote store files that needs to @@ -70,6 +73,19 @@ public void release(LockInfo lockInfo) throws IOException { } } + public String fetchLock(String filenamePrefix, String acquirerId) throws IOException { + Collection lockFiles = lockDirectory.listFilesByPrefix(filenamePrefix); + List lockFilesForAcquirer = lockFiles.stream() + .filter(lockFile -> acquirerId.equals(FileLockInfo.LockFileUtils.getAcquirerIdFromLock(lockFile))) + .map(FileLockInfo.LockFileUtils::getFileToLockNameFromLock) + .collect(Collectors.toList()); + if (lockFilesForAcquirer.size() == 0) { + throw new FileNotFoundException("No lock file found for prefix: " + filenamePrefix + " and acquirerId: " + acquirerId); + } + assert lockFilesForAcquirer.size() == 1; + return lockFilesForAcquirer.get(0); + } + /** * Checks whether a given file have any lock on it or not. * @param lockInfo File Lock Info instance for which we need to check if lock is acquired. diff --git a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java index 7319a5324777a..6fd198747570f 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java +++ b/server/src/main/java/org/opensearch/index/store/remote/file/OnDemandBlockIndexInput.java @@ -27,7 +27,7 @@ *
              * This class delegate the responsibility of actually fetching the block when demanded to its subclasses using * {@link OnDemandBlockIndexInput#fetchBlock(int)}. - * + *

              * Like {@link IndexInput}, this class may only be used from one thread as it is not thread safe. * However, a cleaning action may run from another thread triggered by the {@link Cleaner}, but * this is okay because at that point the {@link OnDemandBlockIndexInput} instance is phantom @@ -428,10 +428,10 @@ Builder blockSizeShift(int blockSizeShift) { * instance to hold the current underlying IndexInput, while allowing it to * be changed out with different instances as {@link OnDemandBlockIndexInput} * reads through the data. - * + *

              * This class implements {@link Runnable} so that it can be passed directly * to the cleaner to run its close action. - * + *

              * [1]: https://github.com/apache/lucene/blob/8340b01c3cc229f33584ce2178b07b8984daa6a9/lucene/core/src/java/org/apache/lucene/store/IndexInput.java#L32-L33 */ private static class BlockHolder implements Closeable, Runnable { diff --git a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java index 207620573886d..1cec20ec3f6cc 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java +++ b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java @@ -10,13 +10,16 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.util.Version; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import java.io.IOException; import java.util.Map; +import java.util.function.Function; import java.util.stream.Collectors; /** @@ -110,11 +113,13 @@ public void write(IndexOutput out) throws IOException { public static RemoteSegmentMetadata read(IndexInput indexInput) throws IOException { Map metadata = indexInput.readMapOfStrings(); - ReplicationCheckpoint replicationCheckpoint = readCheckpointFromIndexInput(indexInput); + final Map uploadedSegmentMetadataMap = RemoteSegmentMetadata + .fromMapOfStrings(metadata); + ReplicationCheckpoint replicationCheckpoint = readCheckpointFromIndexInput(indexInput, uploadedSegmentMetadataMap); int byteArraySize = (int) indexInput.readLong(); byte[] segmentInfosBytes = new byte[byteArraySize]; indexInput.readBytes(segmentInfosBytes, 0, byteArraySize); - return new RemoteSegmentMetadata(RemoteSegmentMetadata.fromMapOfStrings(metadata), segmentInfosBytes, replicationCheckpoint); + return new RemoteSegmentMetadata(uploadedSegmentMetadataMap, segmentInfosBytes, replicationCheckpoint); } public static void writeCheckpointToIndexOutput(ReplicationCheckpoint replicationCheckpoint, IndexOutput out) throws IOException { @@ -131,14 +136,30 @@ public static void writeCheckpointToIndexOutput(ReplicationCheckpoint replicatio out.writeString(replicationCheckpoint.getCodec()); } - private static ReplicationCheckpoint readCheckpointFromIndexInput(IndexInput in) throws IOException { + private static ReplicationCheckpoint readCheckpointFromIndexInput( + IndexInput in, + Map uploadedSegmentMetadataMap + ) throws IOException { return new ReplicationCheckpoint( new ShardId(new Index(in.readString(), in.readString()), in.readVInt()), in.readLong(), in.readLong(), in.readLong(), in.readLong(), - in.readString() + in.readString(), + toStoreFileMetadata(uploadedSegmentMetadataMap) ); } + + private static Map toStoreFileMetadata( + Map metadata + ) { + return metadata.entrySet() + .stream() + // TODO: Version here should be read from UploadedSegmentMetadata. + .map( + entry -> new StoreFileMetadata(entry.getKey(), entry.getValue().getLength(), entry.getValue().getChecksum(), Version.LATEST) + ) + .collect(Collectors.toMap(StoreFileMetadata::name, Function.identity())); + } } diff --git a/server/src/main/java/org/opensearch/index/translog/DefaultTranslogDeletionPolicy.java b/server/src/main/java/org/opensearch/index/translog/DefaultTranslogDeletionPolicy.java index efc762ef00d52..05049e5d07373 100644 --- a/server/src/main/java/org/opensearch/index/translog/DefaultTranslogDeletionPolicy.java +++ b/server/src/main/java/org/opensearch/index/translog/DefaultTranslogDeletionPolicy.java @@ -14,7 +14,7 @@ /** * Default implementation for the {@link TranslogDeletionPolicy}. Plugins can override the default behaviour * via the {@link org.opensearch.plugins.EnginePlugin#getCustomTranslogDeletionPolicyFactory()}. - * + *

              * The default policy uses total number, size in bytes and maximum age for files. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java index 7014bcabe5cfe..5d469c9cc8f25 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java @@ -8,16 +8,17 @@ package org.opensearch.index.translog; -import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; +import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.LifecycleAware; import org.opensearch.index.seqno.LocalCheckpointTracker; import org.opensearch.index.translog.listener.TranslogEventListener; +import org.opensearch.index.translog.transfer.TranslogUploadFailedException; import java.io.Closeable; import java.io.IOException; @@ -43,7 +44,7 @@ public class InternalTranslogManager implements TranslogManager, Closeable { private final AtomicBoolean pendingTranslogRecovery = new AtomicBoolean(false); private final TranslogEventListener translogEventListener; private final Supplier localCheckpointTrackerSupplier; - private static final Logger logger = LogManager.getLogger(InternalTranslogManager.class); + private final Logger logger; public AtomicBoolean getPendingTranslogRecovery() { return pendingTranslogRecovery; @@ -80,17 +81,21 @@ public InternalTranslogManager( assert pendingTranslogRecovery.get() == false : "translog recovery can't be pending before we set it"; // don't allow commits until we are done with recovering pendingTranslogRecovery.set(true); + this.logger = Loggers.getLogger(getClass(), shardId); } /** * Rolls the translog generation and cleans unneeded. */ @Override - public void rollTranslogGeneration() throws TranslogException { + public void rollTranslogGeneration() throws TranslogException, IOException { try (ReleasableLock ignored = readLock.acquire()) { engineLifeCycleAware.ensureOpen(); translog.rollGeneration(); translog.trimUnreferencedReaders(); + } catch (TranslogUploadFailedException e) { + // Do not trigger the translogEventListener as it fails the Engine while this is only an issue with remote upload + throw e; } catch (AlreadyClosedException e) { translogEventListener.onFailure("translog roll generation failed", e); throw e; @@ -424,10 +429,10 @@ public String getTranslogUUID() { * @return if the translog should be flushed */ public boolean shouldPeriodicallyFlush(long localCheckpointOfLastCommit, long flushThreshold) { - final long translogGenerationOfLastCommit = translog.getMinGenerationForSeqNo( - localCheckpointOfLastCommit + 1 - ).translogFileGeneration; - if (translog.sizeInBytesByMinGen(translogGenerationOfLastCommit) < flushThreshold) { + // This is the minimum seqNo that is referred in translog and considered for calculating translog size + long minTranslogRefSeqNo = translog.getMinUnreferencedSeqNoInSegments(localCheckpointOfLastCommit + 1); + final long minReferencedTranslogGeneration = translog.getMinGenerationForSeqNo(minTranslogRefSeqNo).translogFileGeneration; + if (translog.sizeInBytesByMinGen(minReferencedTranslogGeneration) < flushThreshold) { return false; } /* @@ -448,7 +453,7 @@ public boolean shouldPeriodicallyFlush(long localCheckpointOfLastCommit, long fl final long translogGenerationOfNewCommit = translog.getMinGenerationForSeqNo( localCheckpointTrackerSupplier.get().getProcessedCheckpoint() + 1 ).translogFileGeneration; - return translogGenerationOfLastCommit < translogGenerationOfNewCommit + return minReferencedTranslogGeneration < translogGenerationOfNewCommit || localCheckpointTrackerSupplier.get().getProcessedCheckpoint() == localCheckpointTrackerSupplier.get().getMaxSeqNo(); } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java index 339e16db6f360..1e2cb388e690e 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java @@ -8,6 +8,7 @@ package org.opensearch.index.translog; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryMissingException; @@ -31,10 +32,13 @@ public class RemoteBlobStoreInternalTranslogFactory implements TranslogFactory { private final ThreadPool threadPool; + private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; + public RemoteBlobStoreInternalTranslogFactory( Supplier repositoriesServiceSupplier, ThreadPool threadPool, - String repositoryName + String repositoryName, + RemoteTranslogTransferTracker remoteTranslogTransferTracker ) { Repository repository; try { @@ -44,6 +48,7 @@ public RemoteBlobStoreInternalTranslogFactory( } this.repository = repository; this.threadPool = threadPool; + this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; } @Override @@ -68,7 +73,8 @@ public Translog newTranslog( persistedSequenceNumberConsumer, blobStoreRepository, threadPool, - primaryModeSupplier + primaryModeSupplier, + remoteTranslogTransferTracker ); } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index b23374a2cce3b..65d16e213cad1 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -11,12 +11,12 @@ import org.apache.logging.log4j.Logger; import org.opensearch.common.SetOnce; import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.index.translog.transfer.FileTransferTracker; import org.opensearch.index.translog.transfer.TransferSnapshot; @@ -28,8 +28,10 @@ import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.threadpool.ThreadPool; +import java.io.FileNotFoundException; import java.io.IOException; import java.nio.file.Files; +import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.util.HashSet; import java.util.Locale; @@ -55,6 +57,7 @@ public class RemoteFsTranslog extends Translog { private final TranslogTransferManager translogTransferManager; private final FileTransferTracker fileTransferTracker; private final BooleanSupplier primaryModeSupplier; + private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; private volatile long maxRemoteTranslogGenerationUploaded; private volatile long minSeqNoToKeep; @@ -66,6 +69,7 @@ public class RemoteFsTranslog extends Translog { private final SetOnce olderPrimaryCleaned = new SetOnce<>(); private static final int REMOTE_DELETION_PERMITS = 2; + private static final int DOWNLOAD_RETRIES = 2; public static final String TRANSLOG = "translog"; // Semaphore used to allow only single remote generation to happen at a time @@ -80,17 +84,26 @@ public RemoteFsTranslog( LongConsumer persistedSequenceNumberConsumer, BlobStoreRepository blobStoreRepository, ThreadPool threadPool, - BooleanSupplier primaryModeSupplier + BooleanSupplier primaryModeSupplier, + RemoteTranslogTransferTracker remoteTranslogTransferTracker ) throws IOException { super(config, translogUUID, deletionPolicy, globalCheckpointSupplier, primaryTermSupplier, persistedSequenceNumberConsumer); logger = Loggers.getLogger(getClass(), shardId); this.blobStoreRepository = blobStoreRepository; this.primaryModeSupplier = primaryModeSupplier; - fileTransferTracker = new FileTransferTracker(shardId); - this.translogTransferManager = buildTranslogTransferManager(blobStoreRepository, threadPool, shardId, fileTransferTracker); + this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; + fileTransferTracker = new FileTransferTracker(shardId, remoteTranslogTransferTracker); + this.translogTransferManager = buildTranslogTransferManager( + blobStoreRepository, + threadPool, + shardId, + fileTransferTracker, + remoteTranslogTransferTracker + ); try { download(translogTransferManager, location, logger); Checkpoint checkpoint = readCheckpoint(location); + logger.info("Downloaded data from remote translog till maxSeqNo = {}", checkpoint.maxSeqNo); this.readers.addAll(recoverFromFiles(checkpoint)); if (readers.isEmpty()) { String errorMsg = String.format(Locale.ROOT, "%s at least one reader must be recovered", shardId); @@ -124,6 +137,11 @@ public RemoteFsTranslog( } } + // visible for testing + RemoteTranslogTransferTracker getRemoteTranslogTracker() { + return remoteTranslogTransferTracker; + } + public static void download(Repository repository, ShardId shardId, ThreadPool threadPool, Path location, Logger logger) throws IOException { assert repository instanceof BlobStoreRepository : String.format( @@ -132,32 +150,77 @@ public static void download(Repository repository, ShardId shardId, ThreadPool t shardId ); BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; - FileTransferTracker fileTransferTracker = new FileTransferTracker(shardId); + // We use a dummy stats tracker to ensure the flow doesn't break. + // TODO: To be revisited as part of https://github.com/opensearch-project/OpenSearch/issues/7567 + RemoteTranslogTransferTracker remoteTranslogTransferTracker = new RemoteTranslogTransferTracker(shardId, 1000); + FileTransferTracker fileTransferTracker = new FileTransferTracker(shardId, remoteTranslogTransferTracker); TranslogTransferManager translogTransferManager = buildTranslogTransferManager( blobStoreRepository, threadPool, shardId, - fileTransferTracker + fileTransferTracker, + remoteTranslogTransferTracker ); RemoteFsTranslog.download(translogTransferManager, location, logger); + logger.trace(remoteTranslogTransferTracker.toString()); + } + + static void download(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { + /* + In Primary to Primary relocation , there can be concurrent upload and download of translog. + While translog files are getting downloaded by new primary, it might hence be deleted by the primary + Hence we retry if tlog/ckp files are not found . + + This doesn't happen in last download , where it is ensured that older primary has stopped modifying tlog data. + */ + IOException ex = null; + for (int i = 0; i <= DOWNLOAD_RETRIES; i++) { + boolean success = false; + long startTimeMs = System.currentTimeMillis(); + try { + downloadOnce(translogTransferManager, location, logger); + success = true; + return; + } catch (FileNotFoundException | NoSuchFileException e) { + // continue till download retries + ex = e; + } finally { + logger.trace("downloadOnce success={} timeElapsed={}", success, (System.currentTimeMillis() - startTimeMs)); + } + } + logger.info("Exhausted all download retries during translog/checkpoint file download"); + throw ex; } - public static void download(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { - logger.trace("Downloading translog files from remote"); + static private void downloadOnce(TranslogTransferManager translogTransferManager, Path location, Logger logger) throws IOException { + logger.debug("Downloading translog files from remote"); + RemoteTranslogTransferTracker statsTracker = translogTransferManager.getRemoteTranslogTransferTracker(); + long prevDownloadBytesSucceeded = statsTracker.getDownloadBytesSucceeded(); + long prevDownloadTimeInMillis = statsTracker.getTotalDownloadTimeInMillis(); TranslogTransferMetadata translogMetadata = translogTransferManager.readMetadata(); if (translogMetadata != null) { if (Files.notExists(location)) { Files.createDirectories(location); } + // Delete translog files on local before downloading from remote for (Path file : FileSystemUtils.files(location)) { Files.delete(file); } + Map generationToPrimaryTermMapper = translogMetadata.getGenerationToPrimaryTermMapper(); for (long i = translogMetadata.getGeneration(); i >= translogMetadata.getMinTranslogGeneration(); i--) { String generation = Long.toString(i); translogTransferManager.downloadTranslog(generationToPrimaryTermMapper.get(generation), generation, location); } + logger.info( + "Downloaded translog and checkpoint files from={} to={}", + translogMetadata.getMinTranslogGeneration(), + translogMetadata.getGeneration() + ); + + statsTracker.recordDownloadStats(prevDownloadBytesSucceeded, prevDownloadTimeInMillis); + // We copy the latest generation .ckp file to translog.ckp so that flows that depend on // existence of translog.ckp file work in the same way Files.copy( @@ -165,34 +228,31 @@ public static void download(TranslogTransferManager translogTransferManager, Pat location.resolve(Translog.CHECKPOINT_FILE_NAME) ); } - logger.trace("Downloaded translog files from remote"); + logger.debug("downloadOnce execution completed"); } public static TranslogTransferManager buildTranslogTransferManager( BlobStoreRepository blobStoreRepository, ThreadPool threadPool, ShardId shardId, - FileTransferTracker fileTransferTracker + FileTransferTracker fileTransferTracker, + RemoteTranslogTransferTracker remoteTranslogTransferTracker ) { return new TranslogTransferManager( shardId, new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool), blobStoreRepository.basePath().add(shardId.getIndex().getUUID()).add(String.valueOf(shardId.id())).add(TRANSLOG), - fileTransferTracker + fileTransferTracker, + remoteTranslogTransferTracker ); } @Override public boolean ensureSynced(Location location) throws IOException { - try (ReleasableLock ignored = writeLock.acquire()) { - assert location.generation <= current.getGeneration(); - if (location.generation == current.getGeneration()) { - ensureOpen(); - return prepareAndUpload(primaryTermSupplier.getAsLong(), location.generation); - } - } catch (final Exception ex) { - closeOnTragicEvent(ex); - throw ex; + assert location.generation <= current.getGeneration(); + if (location.generation == current.getGeneration()) { + ensureOpen(); + return prepareAndUpload(primaryTermSupplier.getAsLong(), location.generation); } return false; } @@ -207,9 +267,13 @@ public void rollGeneration() throws IOException { } private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOException { + long maxSeqNo = -1; try (Releasable ignored = writeLock.acquire()) { if (generation == null || generation == current.getGeneration()) { try { + if (closed.get() == false) { + maxSeqNo = getMaxSeqNo(); + } final TranslogReader reader = current.closeIntoReader(); readers.add(reader); copyCheckpointTo(location.resolve(getCommitCheckpointFileName(current.getGeneration()))); @@ -217,6 +281,11 @@ private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOExc logger.trace("Creating new writer for gen: [{}]", current.getGeneration() + 1); current = createWriter(current.getGeneration() + 1); } + assert writeLock.isHeldByCurrentThread() : "Write lock must be held before we acquire the read lock"; + // Here we are downgrading the write lock by acquiring the read lock and releasing the write lock + // This ensures that other threads can still acquire the read locks while also protecting the + // readers and writer to not be mutated any further. + readLock.acquire(); } catch (final Exception e) { tragedy.setTragicException(e); closeOnTragicEvent(e); @@ -225,7 +294,10 @@ private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOExc } else if (generation < current.getGeneration()) { return false; } + } + assert readLock.isHeldByCurrentThread() == true; + try (Releasable ignored = readLock; Releasable ignoredGenLock = deletionPolicy.acquireTranslogGen(getMinFileGeneration())) { // Do we need remote writes in sync fashion ? // If we don't , we should swallow FileAlreadyExistsException while writing to remote store // and also verify for same during primary-primary relocation @@ -233,24 +305,24 @@ private boolean prepareAndUpload(Long primaryTerm, Long generation) throws IOExc // is not updated in remote translog except in primary to primary recovery. if (generation == null) { if (closed.get() == false) { - return upload(primaryTerm, current.getGeneration() - 1); + return upload(primaryTerm, current.getGeneration() - 1, maxSeqNo); } else { - return upload(primaryTerm, current.getGeneration()); + return upload(primaryTerm, current.getGeneration(), maxSeqNo); } } else { - return upload(primaryTerm, generation); + return upload(primaryTerm, generation, maxSeqNo); } } } - private boolean upload(Long primaryTerm, Long generation) throws IOException { + private boolean upload(long primaryTerm, long generation, long maxSeqNo) throws IOException { // During primary relocation (primary-primary peer recovery), both the old and the new primary have engine // created with the RemoteFsTranslog. Both primaries are equipped to upload the translogs. The primary mode check // below ensures that the real primary only is uploading. Before the primary mode is set as true for the new // primary, the engine is reset to InternalEngine which also initialises the RemoteFsTranslog which in turns // downloads all the translogs from remote store and does a flush before the relocation finishes. if (primaryModeSupplier.getAsBoolean() == false) { - logger.trace("skipped uploading translog for {} {}", primaryTerm, generation); + logger.debug("skipped uploading translog for {} {}", primaryTerm, generation); // NO-OP return true; } @@ -261,32 +333,14 @@ private boolean upload(Long primaryTerm, Long generation) throws IOException { generation, location, readers, - Translog::getCommitCheckpointFileName + Translog::getCommitCheckpointFileName, + config.getNodeId() ).build() ) { - Releasable transferReleasable = Releasables.wrap(deletionPolicy.acquireTranslogGen(getMinFileGeneration())); - return translogTransferManager.transferSnapshot(transferSnapshotProvider, new TranslogTransferListener() { - @Override - - public void onUploadComplete(TransferSnapshot transferSnapshot) throws IOException { - transferReleasable.close(); - closeFilesIfNoPendingRetentionLocks(); - maxRemoteTranslogGenerationUploaded = generation; - minRemoteGenReferenced = getMinFileGeneration(); - logger.trace("uploaded translog for {} {} ", primaryTerm, generation); - } - - @Override - public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) throws IOException { - transferReleasable.close(); - closeFilesIfNoPendingRetentionLocks(); - if (ex instanceof IOException) { - throw (IOException) ex; - } else { - throw (RuntimeException) ex; - } - } - }); + return translogTransferManager.transferSnapshot( + transferSnapshotProvider, + new RemoteFsTranslogTransferListener(generation, primaryTerm, maxSeqNo) + ); } } @@ -307,14 +361,8 @@ private boolean syncToDisk() throws IOException { @Override public void sync() throws IOException { - try { - if (syncToDisk() || syncNeeded()) { - prepareAndUpload(primaryTermSupplier.getAsLong(), null); - } - } catch (final Exception e) { - tragedy.setTragicException(e); - closeOnTragicEvent(e); - throw e; + if (syncToDisk() || syncNeeded()) { + prepareAndUpload(primaryTermSupplier.getAsLong(), null); } } @@ -388,7 +436,7 @@ public void trimUnreferencedReaders() throws IOException { // cleans up remote translog files not referenced in latest uploaded metadata. // This enables us to restore translog from the metadata in case of failover or relocation. Set generationsToDelete = new HashSet<>(); - for (long generation = minRemoteGenReferenced - 1; generation >= 0; generation--) { + for (long generation = minRemoteGenReferenced - 1 - indexSettings().getRemoteTranslogExtraKeep(); generation >= 0; generation--) { if (fileTransferTracker.uploaded(Translog.getFilename(generation)) == false) { break; } @@ -439,12 +487,16 @@ private void deleteStaleRemotePrimaryTerms() { public static void cleanup(Repository repository, ShardId shardId, ThreadPool threadPool) throws IOException { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; - FileTransferTracker fileTransferTracker = new FileTransferTracker(shardId); + // We use a dummy stats tracker to ensure the flow doesn't break. + // TODO: To be revisited as part of https://github.com/opensearch-project/OpenSearch/issues/7567 + RemoteTranslogTransferTracker remoteTranslogTransferTracker = new RemoteTranslogTransferTracker(shardId, 1000); + FileTransferTracker fileTransferTracker = new FileTransferTracker(shardId, remoteTranslogTransferTracker); TranslogTransferManager translogTransferManager = buildTranslogTransferManager( blobStoreRepository, threadPool, shardId, - fileTransferTracker + fileTransferTracker, + remoteTranslogTransferTracker ); // clean up all remote translog files translogTransferManager.deleteTranslogFiles(); @@ -459,4 +511,61 @@ protected void onDelete() { // clean up all remote translog files translogTransferManager.delete(); } + + // Visible for testing + boolean isRemoteGenerationDeletionPermitsAvailable() { + return remoteGenerationDeletionPermits.availablePermits() == REMOTE_DELETION_PERMITS; + } + + /** + * TranslogTransferListener implementation for RemoteFsTranslog + * + * @opensearch.internal + */ + private class RemoteFsTranslogTransferListener implements TranslogTransferListener { + + /** + * Generation for the translog + */ + private final long generation; + + /** + * Primary Term for the translog + */ + private final long primaryTerm; + + private final long maxSeqNo; + + RemoteFsTranslogTransferListener(long generation, long primaryTerm, long maxSeqNo) { + this.generation = generation; + this.primaryTerm = primaryTerm; + this.maxSeqNo = maxSeqNo; + } + + @Override + public void onUploadComplete(TransferSnapshot transferSnapshot) throws IOException { + maxRemoteTranslogGenerationUploaded = generation; + minRemoteGenReferenced = getMinFileGeneration(); + logger.debug( + "Successfully uploaded translog for primary term = {}, generation = {}, maxSeqNo = {}", + primaryTerm, + generation, + maxSeqNo + ); + } + + @Override + public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) throws IOException { + if (ex instanceof IOException) { + throw (IOException) ex; + } else { + throw (RuntimeException) ex; + } + } + } + + @Override + public long getMinUnreferencedSeqNoInSegments(long minUnrefCheckpointInLastCommit) { + return minSeqNoToKeep; + } } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteTranslogStats.java b/server/src/main/java/org/opensearch/index/translog/RemoteTranslogStats.java new file mode 100644 index 0000000000000..966f8ebc2875a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/RemoteTranslogStats.java @@ -0,0 +1,192 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog; + +import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; + +import java.io.IOException; +import java.util.Objects; + +/** + * Encapsulates the stats related to Remote Translog Store operations + * + * @opensearch.internal + */ +public class RemoteTranslogStats implements ToXContentFragment, Writeable { + /** + * Total number of Remote Translog Store uploads that have been started + */ + private long totalUploadsStarted; + + /** + * Total number of Remote Translog Store uploads that have failed. + */ + private long totalUploadsFailed; + + /** + * Total number of Remote Translog Store uploads that have been successful. + */ + private long totalUploadsSucceeded; + + /** + * Total number of byte uploads to Remote Translog Store that have been started. + */ + private long uploadBytesStarted; + + /** + * Total number of byte uploads to Remote Translog Store that have failed. + */ + private long uploadBytesFailed; + + /** + * Total number of byte uploads to Remote Translog Store that have been successful. + */ + private long uploadBytesSucceeded; + + static final String REMOTE_STORE = "remote_store"; + + public RemoteTranslogStats() {} + + public RemoteTranslogStats(StreamInput in) throws IOException { + this.totalUploadsStarted = in.readVLong(); + this.totalUploadsFailed = in.readVLong(); + this.totalUploadsSucceeded = in.readVLong(); + this.uploadBytesStarted = in.readVLong(); + this.uploadBytesFailed = in.readVLong(); + this.uploadBytesSucceeded = in.readVLong(); + } + + public RemoteTranslogStats(RemoteTranslogTransferTracker.Stats transferTrackerStats) { + this.totalUploadsStarted = transferTrackerStats.totalUploadsStarted; + this.totalUploadsFailed = transferTrackerStats.totalUploadsFailed; + this.totalUploadsSucceeded = transferTrackerStats.totalUploadsSucceeded; + this.uploadBytesStarted = transferTrackerStats.uploadBytesStarted; + this.uploadBytesFailed = transferTrackerStats.uploadBytesFailed; + this.uploadBytesSucceeded = transferTrackerStats.uploadBytesSucceeded; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(totalUploadsStarted); + out.writeVLong(totalUploadsFailed); + out.writeVLong(totalUploadsSucceeded); + out.writeVLong(uploadBytesStarted); + out.writeVLong(uploadBytesFailed); + out.writeVLong(uploadBytesSucceeded); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + RemoteTranslogStats other = (RemoteTranslogStats) obj; + + return this.totalUploadsStarted == other.totalUploadsStarted + && this.totalUploadsFailed == other.totalUploadsFailed + && this.totalUploadsSucceeded == other.totalUploadsSucceeded + && this.uploadBytesStarted == other.uploadBytesStarted + && this.uploadBytesFailed == other.uploadBytesFailed + && this.uploadBytesSucceeded == other.uploadBytesSucceeded; + } + + @Override + public int hashCode() { + return Objects.hash( + totalUploadsStarted, + totalUploadsFailed, + totalUploadsSucceeded, + uploadBytesStarted, + uploadBytesFailed, + uploadBytesSucceeded + ); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(REMOTE_STORE); + + builder.startObject(RemoteStoreStats.SubFields.UPLOAD); + addRemoteTranslogUploadStatsXContent(builder); + builder.endObject(); // translog.remote_store.upload + + builder.endObject(); // translog.remote_store + + return builder; + } + + public long getTotalUploadsStarted() { + return totalUploadsStarted; + } + + public long getTotalUploadsFailed() { + return totalUploadsFailed; + } + + public long getTotalUploadsSucceeded() { + return totalUploadsSucceeded; + } + + public long getUploadBytesStarted() { + return uploadBytesStarted; + } + + public long getUploadBytesFailed() { + return uploadBytesFailed; + } + + public long getUploadBytesSucceeded() { + return uploadBytesSucceeded; + } + + public void add(RemoteTranslogStats other) { + if (other == null) { + return; + } + + this.totalUploadsStarted += other.totalUploadsStarted; + this.totalUploadsFailed += other.totalUploadsFailed; + this.totalUploadsSucceeded += other.totalUploadsSucceeded; + this.uploadBytesStarted += other.uploadBytesStarted; + this.uploadBytesFailed += other.uploadBytesFailed; + this.uploadBytesSucceeded += other.uploadBytesSucceeded; + } + + void addRemoteTranslogUploadStatsXContent(XContentBuilder builder) throws IOException { + builder.startObject(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS); + builder.field(RemoteStoreStats.SubFields.STARTED, totalUploadsStarted) + .field(RemoteStoreStats.SubFields.FAILED, totalUploadsFailed) + .field(RemoteStoreStats.SubFields.SUCCEEDED, totalUploadsSucceeded); + builder.endObject(); + + builder.startObject(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOAD_SIZE); + builder.humanReadableField( + RemoteStoreStats.SubFields.STARTED_BYTES, + RemoteStoreStats.SubFields.STARTED, + new ByteSizeValue(uploadBytesStarted) + ); + builder.humanReadableField( + RemoteStoreStats.SubFields.FAILED_BYTES, + RemoteStoreStats.SubFields.FAILED, + new ByteSizeValue(uploadBytesFailed) + ); + builder.humanReadableField( + RemoteStoreStats.SubFields.SUCCEEDED_BYTES, + RemoteStoreStats.SubFields.SUCCEEDED, + new ByteSizeValue(uploadBytesSucceeded) + ); + builder.endObject(); + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index baa3737d576de..b44aa6e059224 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -840,7 +840,7 @@ public boolean ensureSynced(Stream locations) throws IOException { /** * Closes the translog if the current translog writer experienced a tragic exception. - * + *

              * Note that in case this thread closes the translog it must not already be holding a read lock on the translog as it will acquire a * write lock in the course of closing the translog * @@ -1976,7 +1976,7 @@ static String createEmptyTranslog( /** * Creates a new empty translog within the specified {@code location} that contains the given {@code initialGlobalCheckpoint}, * {@code primaryTerm} and {@code translogUUID}. - * + *

              * This method should be used directly under specific circumstances like for shards that will see no indexing. Specifying a non-unique * translog UUID could cause a lot of issues and that's why in all (but one) cases the method * {@link #createEmptyTranslog(Path, long, ShardId, long)} should be used instead. @@ -2034,4 +2034,8 @@ public static String createEmptyTranslog( writer.close(); return uuid; } + + public long getMinUnreferencedSeqNoInSegments(long minUnrefCheckpointInLastCommit) { + return minUnrefCheckpointInLastCommit; + } } diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java b/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java index cac88bee82a73..6e75ebd847b5e 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogConfig.java @@ -56,6 +56,7 @@ public final class TranslogConfig { private final ShardId shardId; private final Path translogPath; private final ByteSizeValue bufferSize; + private final String nodeId; /** * Creates a new TranslogConfig instance @@ -64,16 +65,24 @@ public final class TranslogConfig { * @param indexSettings the index settings used to set internal variables * @param bigArrays a bigArrays instance used for temporarily allocating write operations */ - public TranslogConfig(ShardId shardId, Path translogPath, IndexSettings indexSettings, BigArrays bigArrays) { - this(shardId, translogPath, indexSettings, bigArrays, DEFAULT_BUFFER_SIZE); + public TranslogConfig(ShardId shardId, Path translogPath, IndexSettings indexSettings, BigArrays bigArrays, String nodeId) { + this(shardId, translogPath, indexSettings, bigArrays, DEFAULT_BUFFER_SIZE, nodeId); } - TranslogConfig(ShardId shardId, Path translogPath, IndexSettings indexSettings, BigArrays bigArrays, ByteSizeValue bufferSize) { + TranslogConfig( + ShardId shardId, + Path translogPath, + IndexSettings indexSettings, + BigArrays bigArrays, + ByteSizeValue bufferSize, + String nodeId + ) { this.bufferSize = bufferSize; this.indexSettings = indexSettings; this.shardId = shardId; this.translogPath = translogPath; this.bigArrays = bigArrays; + this.nodeId = nodeId; } /** @@ -110,4 +119,8 @@ public Path getTranslogPath() { public ByteSizeValue getBufferSize() { return bufferSize; } + + public String getNodeId() { + return nodeId; + } } diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java b/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java index 42bda11d75783..7b5be9505f27a 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogHeader.java @@ -147,7 +147,11 @@ static TranslogHeader read(final String translogUUID, final Path path, final Fil if (actualUUID.bytesEquals(expectedUUID) == false) { throw new TranslogCorruptedException( path.toString(), - "expected shard UUID " + expectedUUID + " but got: " + actualUUID + " this translog file belongs to a different translog" + "expected shard UUID " + + translogUUID + + " but got: " + + translogHeader.translogUUID + + " this translog file belongs to a different translog" ); } return translogHeader; diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java index 303e84dc2b228..4568a3e3d578a 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java @@ -21,7 +21,7 @@ public interface TranslogManager { /** * Rolls the translog generation and cleans unneeded. */ - void rollTranslogGeneration() throws TranslogException; + void rollTranslogGeneration() throws TranslogException, IOException; /** * Performs recovery from the transaction log up to {@code recoverUpToSeqNo} (inclusive). diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogStats.java b/server/src/main/java/org/opensearch/index/translog/TranslogStats.java index cf279334c7557..a4699cea671a0 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogStats.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogStats.java @@ -31,6 +31,7 @@ package org.opensearch.index.translog; +import org.opensearch.Version; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -48,14 +49,21 @@ * @opensearch.internal */ public class TranslogStats implements Writeable, ToXContentFragment { - + private static final String TRANSLOG = "translog"; private long translogSizeInBytes; private int numberOfOperations; private long uncommittedSizeInBytes; private int uncommittedOperations; private long earliestLastModifiedAge; - public TranslogStats() {} + /** + * Stats related to the Remote Translog Store operations + */ + private final RemoteTranslogStats remoteTranslogStats; + + public TranslogStats() { + remoteTranslogStats = new RemoteTranslogStats(); + } public TranslogStats(StreamInput in) throws IOException { numberOfOperations = in.readVInt(); @@ -63,6 +71,9 @@ public TranslogStats(StreamInput in) throws IOException { uncommittedOperations = in.readVInt(); uncommittedSizeInBytes = in.readVLong(); earliestLastModifiedAge = in.readVLong(); + remoteTranslogStats = in.getVersion().onOrAfter(Version.V_2_10_0) + ? in.readOptionalWriteable(RemoteTranslogStats::new) + : new RemoteTranslogStats(); } public TranslogStats( @@ -87,27 +98,37 @@ public TranslogStats( if (earliestLastModifiedAge < 0) { throw new IllegalArgumentException("earliestLastModifiedAge must be >= 0"); } + this.numberOfOperations = numberOfOperations; this.translogSizeInBytes = translogSizeInBytes; this.uncommittedSizeInBytes = uncommittedSizeInBytes; this.uncommittedOperations = uncommittedOperations; this.earliestLastModifiedAge = earliestLastModifiedAge; + this.remoteTranslogStats = new RemoteTranslogStats(); + } + + public void addRemoteTranslogStats(RemoteTranslogStats remoteTranslogStats) { + if (this.remoteTranslogStats != null) { + this.remoteTranslogStats.add(remoteTranslogStats); + } } - public void add(TranslogStats translogStats) { - if (translogStats == null) { + public void add(TranslogStats other) { + if (other == null) { return; } - this.numberOfOperations += translogStats.numberOfOperations; - this.translogSizeInBytes += translogStats.translogSizeInBytes; - this.uncommittedOperations += translogStats.uncommittedOperations; - this.uncommittedSizeInBytes += translogStats.uncommittedSizeInBytes; + this.numberOfOperations += other.numberOfOperations; + this.translogSizeInBytes += other.translogSizeInBytes; + this.uncommittedOperations += other.uncommittedOperations; + this.uncommittedSizeInBytes += other.uncommittedSizeInBytes; if (this.earliestLastModifiedAge == 0) { - this.earliestLastModifiedAge = translogStats.earliestLastModifiedAge; + this.earliestLastModifiedAge = other.earliestLastModifiedAge; } else { - this.earliestLastModifiedAge = Math.min(this.earliestLastModifiedAge, translogStats.earliestLastModifiedAge); + this.earliestLastModifiedAge = Math.min(this.earliestLastModifiedAge, other.earliestLastModifiedAge); } + + addRemoteTranslogStats(other.remoteTranslogStats); } public long getTranslogSizeInBytes() { @@ -132,15 +153,20 @@ public long getEarliestLastModifiedAge() { return earliestLastModifiedAge; } + public RemoteTranslogStats getRemoteTranslogStats() { + return remoteTranslogStats; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject("translog"); - builder.field("operations", numberOfOperations); - builder.humanReadableField("size_in_bytes", "size", new ByteSizeValue(translogSizeInBytes)); - builder.field("uncommitted_operations", uncommittedOperations); - builder.humanReadableField("uncommitted_size_in_bytes", "uncommitted_size", new ByteSizeValue(uncommittedSizeInBytes)); - builder.field("earliest_last_modified_age", earliestLastModifiedAge); + builder.startObject(TRANSLOG); + addLocalTranslogStatsXContent(builder); + if (remoteTranslogStats != null) { + builder = remoteTranslogStats.toXContent(builder, params); + } + builder.endObject(); + return builder; } @@ -156,5 +182,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(uncommittedOperations); out.writeVLong(uncommittedSizeInBytes); out.writeVLong(earliestLastModifiedAge); + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { + out.writeOptionalWriteable(remoteTranslogStats); + } + } + + private void addLocalTranslogStatsXContent(XContentBuilder builder) throws IOException { + builder.field("operations", numberOfOperations); + builder.humanReadableField("size_in_bytes", "size", new ByteSizeValue(translogSizeInBytes)); + builder.field("uncommitted_operations", uncommittedOperations); + builder.humanReadableField("uncommitted_size_in_bytes", "uncommitted_size", new ByteSizeValue(uncommittedSizeInBytes)); + builder.field("earliest_last_modified_age", earliestLastModifiedAge); } } diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java b/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java index a5f0607431a8b..3f33c155be15e 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogWriter.java @@ -376,7 +376,7 @@ synchronized boolean assertNoSeqAbove(long belowTerm, long aboveSeqNo) { /** * write all buffered ops to disk and fsync file. - * + *

              * Note: any exception during the sync process will be interpreted as a tragic exception and the writer will be closed before * raising the exception. * @return true if this call caused an actual sync operation diff --git a/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java index 0d85123b60c75..25fcdc614172a 100644 --- a/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java +++ b/server/src/main/java/org/opensearch/index/translog/TruncateTranslogAction.java @@ -194,7 +194,8 @@ private boolean isTranslogClean(ShardPath shardPath, ClusterState clusterState, shardPath.getShardId(), translogPath, indexSettings, - BigArrays.NON_RECYCLING_INSTANCE + BigArrays.NON_RECYCLING_INSTANCE, + "" ); long primaryTerm = indexSettings.getIndexMetadata().primaryTerm(shardPath.getShardId().id()); // We open translog to check for corruption, do not clean anything. diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java index f9b126e2de4a4..82dd6301ef79f 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java @@ -12,10 +12,11 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionRunnable; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; +import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.common.blobstore.VerifyingMultiStreamBlobContainer; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; @@ -92,7 +93,7 @@ public void uploadBlobs( ) { fileSnapshots.forEach(fileSnapshot -> { BlobPath blobPath = blobPaths.get(fileSnapshot.getPrimaryTerm()); - if (!(blobStore.blobContainer(blobPath) instanceof VerifyingMultiStreamBlobContainer)) { + if (!(blobStore.blobContainer(blobPath) instanceof AsyncMultiStreamBlobContainer)) { uploadBlob(ThreadPool.Names.TRANSLOG_TRANSFER, fileSnapshot, blobPath, listener, writePriority); } else { uploadBlob(fileSnapshot, listener, blobPath, writePriority); @@ -114,6 +115,11 @@ private void uploadBlob( try (FileChannel channel = channelFactory.open(fileSnapshot.getPath(), StandardOpenOption.READ)) { contentLength = channel.size(); } + boolean remoteIntegrityEnabled = false; + BlobContainer blobContainer = blobStore.blobContainer(blobPath); + if (blobContainer instanceof AsyncMultiStreamBlobContainer) { + remoteIntegrityEnabled = ((AsyncMultiStreamBlobContainer) blobContainer).remoteIntegrityCheckSupported(); + } RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( fileSnapshot.getName(), fileSnapshot.getName(), @@ -122,7 +128,7 @@ private void uploadBlob( writePriority, (size, position) -> new OffsetRangeFileInputStream(fileSnapshot.getPath(), size, position), Objects.requireNonNull(fileSnapshot.getChecksum()), - blobStore.blobContainer(blobPath) instanceof VerifyingMultiStreamBlobContainer + remoteIntegrityEnabled ); ActionListener completionListener = ActionListener.wrap(resp -> listener.onResponse(fileSnapshot), ex -> { logger.error(() -> new ParameterizedMessage("Failed to upload blob {}", fileSnapshot.getName()), ex); @@ -138,7 +144,7 @@ private void uploadBlob( }); WriteContext writeContext = remoteTransferContainer.createWriteContext(); - ((VerifyingMultiStreamBlobContainer) blobStore.blobContainer(blobPath)).asyncBlobUpload(writeContext, completionListener); + ((AsyncMultiStreamBlobContainer) blobStore.blobContainer(blobPath)).asyncBlobUpload(writeContext, completionListener); } catch (Exception e) { logger.error(() -> new ParameterizedMessage("Failed to upload blob {}", fileSnapshot.getName()), e); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java b/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java index 406533561a798..9c2304f809f46 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/FileTransferTracker.java @@ -9,11 +9,15 @@ package org.opensearch.index.translog.transfer; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; import org.opensearch.index.translog.transfer.listener.FileTransferListener; +import java.io.IOException; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -26,14 +30,43 @@ public class FileTransferTracker implements FileTransferListener { private final ConcurrentHashMap fileTransferTracker; private final ShardId shardId; + private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; + private Map bytesForTlogCkpFileToUpload; + private long fileTransferStartTime = -1; - public FileTransferTracker(ShardId shardId) { + public FileTransferTracker(ShardId shardId, RemoteTranslogTransferTracker remoteTranslogTransferTracker) { this.shardId = shardId; this.fileTransferTracker = new ConcurrentHashMap<>(); + this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; + } + + void recordFileTransferStartTime(long uploadStartTime) { + // Recording the start time more than once for a sync is invalid + if (fileTransferStartTime == -1) { + fileTransferStartTime = uploadStartTime; + } + } + + void recordBytesForFiles(Set toUpload) { + bytesForTlogCkpFileToUpload = new HashMap<>(); + toUpload.forEach(file -> { + try { + bytesForTlogCkpFileToUpload.put(file.getName(), file.getContentLength()); + } catch (IOException ignored) { + bytesForTlogCkpFileToUpload.put(file.getName(), 0L); + } + }); + } + + long getTotalBytesToUpload() { + return bytesForTlogCkpFileToUpload.values().stream().reduce(0L, Long::sum); } @Override public void onSuccess(TransferFileSnapshot fileSnapshot) { + long durationInMillis = (System.nanoTime() - fileTransferStartTime) / 1_000_000L; + remoteTranslogTransferTracker.addUploadTimeInMillis(durationInMillis); + remoteTranslogTransferTracker.addUploadBytesSucceeded(bytesForTlogCkpFileToUpload.get(fileSnapshot.getName())); add(fileSnapshot.getName(), TransferState.SUCCESS); } @@ -53,6 +86,9 @@ private void add(String file, TransferState targetState) { @Override public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { + long durationInMillis = (System.nanoTime() - fileTransferStartTime) / 1_000_000L; + remoteTranslogTransferTracker.addUploadTimeInMillis(durationInMillis); + remoteTranslogTransferTracker.addUploadBytesFailed(bytesForTlogCkpFileToUpload.get(fileSnapshot.getName())); add(fileSnapshot.getName(), TransferState.FAILED); } diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java index 10dec13c81e1a..fb78731246a07 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogCheckpointTransferSnapshot.java @@ -40,11 +40,14 @@ public class TranslogCheckpointTransferSnapshot implements TransferSnapshot, Clo private final long primaryTerm; private long minTranslogGeneration; - TranslogCheckpointTransferSnapshot(long primaryTerm, long generation, int size) { + private String nodeId; + + TranslogCheckpointTransferSnapshot(long primaryTerm, long generation, int size, String nodeId) { translogCheckpointFileInfoTupleSet = new HashSet<>(size); this.size = size; this.generation = generation; this.primaryTerm = primaryTerm; + this.nodeId = nodeId; } private void add(TranslogFileSnapshot translogFileSnapshot, CheckpointFileSnapshot checkPointFileSnapshot) { @@ -63,7 +66,13 @@ public Set getTranslogFileSnapshots() { @Override public TranslogTransferMetadata getTranslogTransferMetadata() { - return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, translogCheckpointFileInfoTupleSet.size() * 2); + return new TranslogTransferMetadata( + primaryTerm, + generation, + minTranslogGeneration, + translogCheckpointFileInfoTupleSet.size() * 2, + nodeId + ); } @Override @@ -110,19 +119,22 @@ public static class Builder { private final List readers; private final Function checkpointGenFileNameMapper; private final Path location; + private final String nodeId; public Builder( long primaryTerm, long generation, Path location, List readers, - Function checkpointGenFileNameMapper + Function checkpointGenFileNameMapper, + String nodeId ) { this.primaryTerm = primaryTerm; this.generation = generation; this.readers = readers; this.checkpointGenFileNameMapper = checkpointGenFileNameMapper; this.location = location; + this.nodeId = nodeId; } public TranslogCheckpointTransferSnapshot build() throws IOException { @@ -134,7 +146,8 @@ public TranslogCheckpointTransferSnapshot build() throws IOException { TranslogCheckpointTransferSnapshot translogTransferSnapshot = new TranslogCheckpointTransferSnapshot( primaryTerm, generation, - readers.size() + readers.size(), + nodeId ); for (TranslogReader reader : readers) { final long readerGeneration = reader.getGeneration(); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index 1d42e8a546858..2f6055df87804 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -24,6 +24,8 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.transfer.listener.TranslogTransferListener; import org.opensearch.threadpool.ThreadPool; @@ -40,7 +42,6 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; import static org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; @@ -59,9 +60,12 @@ public class TranslogTransferManager { private final BlobPath remoteMetadataTransferPath; private final BlobPath remoteBaseTransferPath; private final FileTransferTracker fileTransferTracker; + private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; private static final long TRANSFER_TIMEOUT_IN_MILLIS = 30000; + private static final int METADATA_FILES_TO_FETCH = 10; + private final Logger logger; private final static String METADATA_DIR = "metadata"; private final static String DATA_DIR = "data"; @@ -76,7 +80,8 @@ public TranslogTransferManager( ShardId shardId, TransferService transferService, BlobPath remoteBaseTransferPath, - FileTransferTracker fileTransferTracker + FileTransferTracker fileTransferTracker, + RemoteTranslogTransferTracker remoteTranslogTransferTracker ) { this.shardId = shardId; this.transferService = transferService; @@ -85,6 +90,11 @@ public TranslogTransferManager( this.remoteMetadataTransferPath = remoteBaseTransferPath.add(METADATA_DIR); this.fileTransferTracker = fileTransferTracker; this.logger = Loggers.getLogger(getClass(), shardId); + this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; + } + + public RemoteTranslogTransferTracker getRemoteTranslogTransferTracker() { + return remoteTranslogTransferTracker; } public ShardId getShardId() { @@ -95,14 +105,22 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans throws IOException { List exceptionList = new ArrayList<>(transferSnapshot.getTranslogTransferMetadata().getCount()); Set toUpload = new HashSet<>(transferSnapshot.getTranslogTransferMetadata().getCount()); + long metadataBytesToUpload; + long metadataUploadStartTime; + long uploadStartTime; + long prevUploadBytesSucceeded = remoteTranslogTransferTracker.getUploadBytesSucceeded(); + long prevUploadTimeInMillis = remoteTranslogTransferTracker.getTotalUploadTimeInMillis(); + try { toUpload.addAll(fileTransferTracker.exclusionFilter(transferSnapshot.getTranslogFileSnapshots())); toUpload.addAll(fileTransferTracker.exclusionFilter((transferSnapshot.getCheckpointFileSnapshots()))); if (toUpload.isEmpty()) { logger.trace("Nothing to upload for transfer"); - translogTransferListener.onUploadComplete(transferSnapshot); return true; } + + fileTransferTracker.recordBytesForFiles(toUpload); + captureStatsBeforeUpload(); final CountDownLatch latch = new CountDownLatch(toUpload.size()); LatchedActionListener latchedActionListener = new LatchedActionListener<>( ActionListener.wrap(fileTransferTracker::onSuccess, ex -> { @@ -115,7 +133,8 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans ex ); FileTransferException e = (FileTransferException) ex; - fileTransferTracker.onFailure(e.getFileSnapshot(), ex); + TransferFileSnapshot file = e.getFileSnapshot(); + fileTransferTracker.onFailure(file, ex); exceptionList.add(ex); }), latch @@ -128,37 +147,92 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans ) ); + uploadStartTime = System.nanoTime(); + // TODO: Ideally each file's upload start time should be when it is actually picked for upload + // https://github.com/opensearch-project/OpenSearch/issues/9729 + fileTransferTracker.recordFileTransferStartTime(uploadStartTime); transferService.uploadBlobs(toUpload, blobPathMap, latchedActionListener, WritePriority.HIGH); try { if (latch.await(TRANSFER_TIMEOUT_IN_MILLIS, TimeUnit.MILLISECONDS) == false) { - Exception ex = new TimeoutException("Timed out waiting for transfer of snapshot " + transferSnapshot + " to complete"); + Exception ex = new TranslogUploadFailedException( + "Timed out waiting for transfer of snapshot " + transferSnapshot + " to complete" + ); exceptionList.forEach(ex::addSuppressed); throw ex; } } catch (InterruptedException ex) { - exceptionList.forEach(ex::addSuppressed); + Exception exception = new TranslogUploadFailedException("Failed to upload " + transferSnapshot, ex); + exceptionList.forEach(exception::addSuppressed); Thread.currentThread().interrupt(); - throw ex; + throw exception; } if (exceptionList.isEmpty()) { - transferService.uploadBlob(prepareMetadata(transferSnapshot), remoteMetadataTransferPath, WritePriority.HIGH); + TransferFileSnapshot tlogMetadata = prepareMetadata(transferSnapshot); + metadataBytesToUpload = tlogMetadata.getContentLength(); + remoteTranslogTransferTracker.addUploadBytesStarted(metadataBytesToUpload); + metadataUploadStartTime = System.nanoTime(); + try { + transferService.uploadBlob(tlogMetadata, remoteMetadataTransferPath, WritePriority.HIGH); + } catch (Exception exception) { + remoteTranslogTransferTracker.addUploadTimeInMillis((System.nanoTime() - metadataUploadStartTime) / 1_000_000L); + remoteTranslogTransferTracker.addUploadBytesFailed(metadataBytesToUpload); + // outer catch handles capturing stats on upload failure + throw new TranslogUploadFailedException("Failed to upload " + tlogMetadata.getName(), exception); + } + + remoteTranslogTransferTracker.addUploadTimeInMillis((System.nanoTime() - metadataUploadStartTime) / 1_000_000L); + remoteTranslogTransferTracker.addUploadBytesSucceeded(metadataBytesToUpload); + captureStatsOnUploadSuccess(prevUploadBytesSucceeded, prevUploadTimeInMillis); translogTransferListener.onUploadComplete(transferSnapshot); return true; } else { - Exception ex = new IOException("Failed to upload " + exceptionList.size() + " files during transfer"); + Exception ex = new TranslogUploadFailedException("Failed to upload " + exceptionList.size() + " files during transfer"); exceptionList.forEach(ex::addSuppressed); throw ex; } } catch (Exception ex) { logger.error(() -> new ParameterizedMessage("Transfer failed for snapshot {}", transferSnapshot), ex); + captureStatsOnUploadFailure(); translogTransferListener.onUploadFailed(transferSnapshot, ex); return false; } } + /** + * Adds relevant stats to the tracker when an upload is started + */ + private void captureStatsBeforeUpload() { + remoteTranslogTransferTracker.incrementTotalUploadsStarted(); + // TODO: Ideally each file's byte uploads started should be when it is actually picked for upload + // https://github.com/opensearch-project/OpenSearch/issues/9729 + remoteTranslogTransferTracker.addUploadBytesStarted(fileTransferTracker.getTotalBytesToUpload()); + } + + /** + * Adds relevant stats to the tracker when an upload is successfully completed + */ + private void captureStatsOnUploadSuccess(long prevUploadBytesSucceeded, long prevUploadTimeInMillis) { + remoteTranslogTransferTracker.setLastSuccessfulUploadTimestamp(System.currentTimeMillis()); + remoteTranslogTransferTracker.incrementTotalUploadsSucceeded(); + long totalUploadedBytes = remoteTranslogTransferTracker.getUploadBytesSucceeded() - prevUploadBytesSucceeded; + remoteTranslogTransferTracker.updateUploadBytesMovingAverage(totalUploadedBytes); + long uploadDurationInMillis = remoteTranslogTransferTracker.getTotalUploadTimeInMillis() - prevUploadTimeInMillis; + remoteTranslogTransferTracker.updateUploadTimeMovingAverage(uploadDurationInMillis); + if (uploadDurationInMillis > 0) { + remoteTranslogTransferTracker.updateUploadBytesPerSecMovingAverage((totalUploadedBytes * 1_000L) / uploadDurationInMillis); + } + } + + /** + * Adds relevant stats to the tracker when an upload has failed + */ + private void captureStatsOnUploadFailure() { + remoteTranslogTransferTracker.incrementTotalUploadsFailed(); + } + public boolean downloadTranslog(String primaryTerm, String generation, Path location) throws IOException { - logger.info( + logger.trace( "Downloading translog files with: Primary Term = {}, Generation = {}, Location = {}", primaryTerm, generation, @@ -180,9 +254,21 @@ private void downloadToFS(String fileName, Path location, String primaryTerm) th if (Files.exists(filePath)) { Files.delete(filePath); } + + boolean downloadStatus = false; + long bytesToRead = 0, downloadStartTime = System.nanoTime(); try (InputStream inputStream = transferService.downloadBlob(remoteDataTransferPath.add(primaryTerm), fileName)) { + // Capture number of bytes for stats before reading + bytesToRead = inputStream.available(); Files.copy(inputStream, filePath); + downloadStatus = true; + } finally { + remoteTranslogTransferTracker.addDownloadTimeInMillis((System.nanoTime() - downloadStartTime) / 1_000_000L); + if (downloadStatus) { + remoteTranslogTransferTracker.addDownloadBytesSucceeded(bytesToRead); + } } + // Mark in FileTransferTracker so that the same files are not uploaded at the time of translog sync fileTransferTracker.add(fileName, true); } @@ -194,15 +280,33 @@ public TranslogTransferMetadata readMetadata() throws IOException { LatchedActionListener> latchedActionListener = new LatchedActionListener<>( ActionListener.wrap(blobMetadataList -> { if (blobMetadataList.isEmpty()) return; + RemoteStoreUtils.verifyNoMultipleWriters( + blobMetadataList.stream().map(BlobMetadata::name).collect(Collectors.toList()), + TranslogTransferMetadata::getNodeIdByPrimaryTermAndGen + ); String filename = blobMetadataList.get(0).name(); + boolean downloadStatus = false; + long downloadStartTime = System.nanoTime(), bytesToRead = 0; try (InputStream inputStream = transferService.downloadBlob(remoteMetadataTransferPath, filename)) { + // Capture number of bytes for stats before reading + bytesToRead = inputStream.available(); IndexInput indexInput = new ByteArrayIndexInput("metadata file", inputStream.readAllBytes()); metadataSetOnce.set(metadataStreamWrapper.readStream(indexInput)); + downloadStatus = true; } catch (IOException e) { logger.error(() -> new ParameterizedMessage("Exception while reading metadata file: {}", filename), e); exceptionSetOnce.set(e); + } finally { + remoteTranslogTransferTracker.addDownloadTimeInMillis((System.nanoTime() - downloadStartTime) / 1_000_000L); + logger.debug("translogMetadataDownloadStatus={}", downloadStatus); + if (downloadStatus) { + remoteTranslogTransferTracker.addDownloadBytesSucceeded(bytesToRead); + } } }, e -> { + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } logger.error(() -> new ParameterizedMessage("Exception while listing metadata files"), e); exceptionSetOnce.set((IOException) e); }), @@ -213,7 +317,7 @@ public TranslogTransferMetadata readMetadata() throws IOException { transferService.listAllInSortedOrder( remoteMetadataTransferPath, TranslogTransferMetadata.METADATA_PREFIX, - 1, + METADATA_FILES_TO_FETCH, latchedActionListener ); latch.await(); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java index a8b3404d3f2ce..052206d807fa6 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java @@ -9,6 +9,7 @@ package org.opensearch.index.translog.transfer; import org.opensearch.common.SetOnce; +import org.opensearch.common.collect.Tuple; import org.opensearch.index.remote.RemoteStoreUtils; import java.util.Arrays; @@ -30,7 +31,7 @@ public class TranslogTransferMetadata { private final long minTranslogGeneration; - private int count; + private final int count; private final SetOnce> generationToPrimaryTermMapper = new SetOnce<>(); @@ -46,12 +47,22 @@ public class TranslogTransferMetadata { private final long createdAt; - public TranslogTransferMetadata(long primaryTerm, long generation, long minTranslogGeneration, int count) { + private final String nodeId; + + public TranslogTransferMetadata(long primaryTerm, long generation, long minTranslogGeneration, int count, String nodeId) { this.primaryTerm = primaryTerm; this.generation = generation; this.minTranslogGeneration = minTranslogGeneration; this.count = count; this.createdAt = System.currentTimeMillis(); + this.nodeId = nodeId; + } + + /* + Used only at the time of download . Since details are read from content , nodeId is not available + */ + public TranslogTransferMetadata(long primaryTerm, long generation, long minTranslogGeneration, int count) { + this(primaryTerm, generation, minTranslogGeneration, count, ""); } public long getPrimaryTerm() { @@ -89,11 +100,33 @@ public String getFileName() { RemoteStoreUtils.invertLong(primaryTerm), RemoteStoreUtils.invertLong(generation), RemoteStoreUtils.invertLong(createdAt), + String.valueOf(Objects.hash(nodeId)), String.valueOf(CURRENT_VERSION) ) ); } + public static Tuple, String> getNodeIdByPrimaryTermAndGeneration(String filename) { + String[] tokens = filename.split(METADATA_SEPARATOR); + if (tokens.length < 6) { + // For versions < 2.11, we don't have node id + return null; + } + return new Tuple<>(new Tuple<>(RemoteStoreUtils.invertLong(tokens[1]), RemoteStoreUtils.invertLong(tokens[2])), tokens[4]); + } + + public static Tuple getNodeIdByPrimaryTermAndGen(String filename) { + String[] tokens = filename.split(METADATA_SEPARATOR); + if (tokens.length < 6) { + // For versions < 2.11, we don't have node id. + return null; + } + String primaryTermAndGen = String.join(METADATA_SEPARATOR, tokens[1], tokens[2]); + + String nodeId = tokens[4]; + return new Tuple<>(primaryTermAndGen, nodeId); + } + @Override public int hashCode() { return Objects.hash(primaryTerm, generation); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogUploadFailedException.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogUploadFailedException.java new file mode 100644 index 0000000000000..4a9b10ec5a52e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogUploadFailedException.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog.transfer; + +import java.io.IOException; + +/** + * Exception is thrown if there are any exceptions while uploading translog to remote store. + * @opensearch.internal + */ +public class TranslogUploadFailedException extends IOException { + + public TranslogUploadFailedException(String message) { + super(message); + } + + public TranslogUploadFailedException(String message, Throwable cause) { + super(message, cause); + } + +} diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java b/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java index c09fd8798e505..132d1adf916da 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/listener/TranslogTransferListener.java @@ -18,7 +18,6 @@ * @opensearch.internal */ public interface TranslogTransferListener { - /** * Invoked when the transfer of {@link TransferSnapshot} succeeds * @param transferSnapshot the transfer snapshot diff --git a/server/src/main/java/org/opensearch/indices/IndicesModule.java b/server/src/main/java/org/opensearch/indices/IndicesModule.java index c6ae8b988aed0..5c2137ec742a4 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesModule.java +++ b/server/src/main/java/org/opensearch/indices/IndicesModule.java @@ -38,7 +38,6 @@ import org.opensearch.action.admin.indices.rollover.MaxSizeCondition; import org.opensearch.action.resync.TransportResyncReplicationAction; import org.opensearch.common.inject.AbstractModule; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.NamedWriteableRegistry.Entry; @@ -288,9 +287,7 @@ protected void configure() { bind(RetentionLeaseSyncer.class).asEagerSingleton(); bind(SegmentReplicationCheckpointPublisher.class).asEagerSingleton(); bind(SegmentReplicationPressureService.class).asEagerSingleton(); - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - bind(RemoteStorePressureService.class).asEagerSingleton(); - } + bind(RemoteStorePressureService.class).asEagerSingleton(); } /** diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index d4c2d16279f9e..515958cd69235 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -49,6 +49,7 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags.Flag; import org.opensearch.action.admin.indices.stats.IndexShardStats; import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.action.search.SearchType; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; @@ -123,7 +124,7 @@ import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.recovery.RecoveryStats; import org.opensearch.index.refresh.RefreshStats; -import org.opensearch.index.remote.RemoteStorePressureService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.search.stats.SearchStats; import org.opensearch.index.seqno.RetentionLeaseStats; import org.opensearch.index.seqno.RetentionLeaseSyncer; @@ -134,6 +135,7 @@ import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.IndexingOperationListener; import org.opensearch.index.shard.IndexingStats; +import org.opensearch.index.shard.IndexingStats.Stats.DocStatusStats; import org.opensearch.index.store.remote.filecache.FileCacheCleaner; import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; @@ -144,6 +146,7 @@ import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationType; @@ -245,31 +248,66 @@ public class IndicesService extends AbstractLifecycleComponent ); /** - * Used to specify if all indexes are to create with remote store enabled by default + * Used to specify the default translog buffer interval for remote store backed indexes. */ - public static final Setting CLUSTER_REMOTE_STORE_ENABLED_SETTING = Setting.boolSetting( - "cluster.remote_store.enabled", - false, + public static final Setting CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING = Setting.timeSetting( + "cluster.remote_store.translog.buffer_interval", + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + IndexSettings.MINIMUM_REMOTE_TRANSLOG_BUFFER_INTERVAL, Property.NodeScope, - Property.Final + Property.Dynamic + ); + + /** + * This setting is used to set the refresh interval when the {@code index.refresh_interval} index setting is not + * provided during index creation or when the existing {@code index.refresh_interval} index setting is set as null. + * This comes handy when the user wants to set a default refresh interval across all indexes created in a cluster + * which is different from 1s and also at the same time have searchIdle feature supported. The setting can only be + * as low as the {@code cluster.minimum.index.refresh_interval}. + */ + public static final Setting CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting( + "cluster.default.index.refresh_interval", + IndexSettings.DEFAULT_REFRESH_INTERVAL, + IndexSettings.MINIMUM_REFRESH_INTERVAL, + new ClusterDefaultRefreshIntervalValidator(), + Property.NodeScope, + Property.Dynamic + ); + + /** + * This setting is used to set the minimum refresh interval applicable for all indexes in a cluster. The + * {@code cluster.default.index.refresh_interval} setting value needs to be higher than this setting's value. Index + * creation will fail if the index setting {@code index.refresh_interval} is supplied with a value lower than the + * cluster minimum refresh interval. + */ + public static final Setting CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING = Setting.timeSetting( + "cluster.minimum.index.refresh_interval", + IndexSettings.MINIMUM_REFRESH_INTERVAL, + IndexSettings.MINIMUM_REFRESH_INTERVAL, + new ClusterMinimumRefreshIntervalValidator(), + Property.NodeScope, + Property.Dynamic ); /** - * Used to specify default repo to use for segment upload for remote store backed indices + * This setting is used to restrict creation or updation of index where the `index.translog.durability` index setting + * is set as ASYNC if enabled. If disabled, any of the durability mode can be used and switched at any later time from + * one to another. */ - public static final Setting CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING = Setting.simpleString( - "cluster.remote_store.segment.repository", - "", + public static final Setting CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING = Setting.boolSetting( + "cluster.remote_store.index.restrict.async-durability", + false, Property.NodeScope, Property.Final ); /** - * Used to specify default repo to use for translog upload for remote store backed indices + * This setting is used to restrict creation of index where the 'index.replication.type' index setting is set. + * If disabled, the replication type can be specified. */ - public static final Setting CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING = Setting.simpleString( - "cluster.remote_store.translog.repository", - "", + public static final Setting CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING = Setting.boolSetting( + "cluster.restrict.index.replication_type", + false, Property.NodeScope, Property.Final ); @@ -311,6 +349,7 @@ public class IndicesService extends AbstractLifecycleComponent private final CountDownLatch closeLatch = new CountDownLatch(1); private volatile boolean idFieldDataEnabled; private volatile boolean allowExpensiveQueries; + private final RecoverySettings recoverySettings; @Nullable private final OpenSearchThreadPoolExecutor danglingIndicesThreadPoolExecutor; @@ -319,9 +358,12 @@ public class IndicesService extends AbstractLifecycleComponent private final ValuesSourceRegistry valuesSourceRegistry; private final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory; private final BiFunction translogFactorySupplier; - + private volatile TimeValue clusterDefaultRefreshInterval; + private volatile TimeValue clusterRemoteTranslogBufferInterval; private final FileCacheCleaner fileCacheCleaner; + private final SearchRequestStats searchRequestStats; + @Override protected void doStart() { // Start thread that will manage cleaning the field data cache periodically @@ -351,7 +393,10 @@ public IndicesService( Map recoveryStateFactories, IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, Supplier repositoriesServiceSupplier, - FileCacheCleaner fileCacheCleaner + FileCacheCleaner fileCacheCleaner, + SearchRequestStats searchRequestStats, + @Nullable RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + RecoverySettings recoverySettings ) { this.settings = settings; this.threadPool = threadPool; @@ -440,19 +485,45 @@ protected void closeInternal() { this.allowExpensiveQueries = ALLOW_EXPENSIVE_QUERIES.get(clusterService.getSettings()); clusterService.getClusterSettings().addSettingsUpdateConsumer(ALLOW_EXPENSIVE_QUERIES, this::setAllowExpensiveQueries); this.remoteDirectoryFactory = remoteDirectoryFactory; - this.translogFactorySupplier = getTranslogFactorySupplier(repositoriesServiceSupplier, threadPool); + this.translogFactorySupplier = getTranslogFactorySupplier(repositoriesServiceSupplier, threadPool, remoteStoreStatsTrackerFactory); + this.searchRequestStats = searchRequestStats; + this.clusterDefaultRefreshInterval = CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.get(clusterService.getSettings()); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING, this::onRefreshIntervalUpdate); + this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(clusterService.getSettings()); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setClusterRemoteTranslogBufferInterval); + this.recoverySettings = recoverySettings; + } + + /** + * The changes to dynamic cluster setting {@code cluster.default.index.refresh_interval} needs to be updated. This + * method gets called whenever the setting changes. We set the instance variable with the updated value as this is + * also a supplier to all IndexService that have been created on the node. We also notify the change to all + * IndexService instances that are created on this node. + * + * @param clusterDefaultRefreshInterval the updated cluster default refresh interval. + */ + private void onRefreshIntervalUpdate(TimeValue clusterDefaultRefreshInterval) { + this.clusterDefaultRefreshInterval = clusterDefaultRefreshInterval; + for (Map.Entry entry : indices.entrySet()) { + IndexService indexService = entry.getValue(); + indexService.onRefreshIntervalChange(); + } } private static BiFunction getTranslogFactorySupplier( Supplier repositoriesServiceSupplier, - ThreadPool threadPool + ThreadPool threadPool, + RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) { return (indexSettings, shardRouting) -> { if (indexSettings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { return new RemoteBlobStoreInternalTranslogFactory( repositoriesServiceSupplier, threadPool, - indexSettings.getRemoteStoreTranslogRepository() + indexSettings.getRemoteStoreTranslogRepository(), + remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardRouting.shardId()) ); } return new InternalTranslogFactory(); @@ -540,7 +611,7 @@ public NodeIndicesStats stats(CommonStatsFlags flags) { } } - return new NodeIndicesStats(commonStats, statsByShard(this, flags)); + return new NodeIndicesStats(commonStats, statsByShard(this, flags), searchRequestStats); } Map> statsByShard(final IndicesService indicesService, final CommonStatsFlags flags) { @@ -819,7 +890,10 @@ private synchronized IndexService createIndexService( this::isIdFieldDataEnabled, valuesSourceRegistry, remoteDirectoryFactory, - translogFactorySupplier + translogFactorySupplier, + this::getClusterDefaultRefreshInterval, + this::getClusterRemoteTranslogBufferInterval, + this.recoverySettings ); } @@ -866,7 +940,7 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { /** * creates a new mapper service for the given index, in order to do administrative work like mapping updates. * This *should not* be used for document parsing. Doing so will result in an exception. - * + *

              * Note: the returned {@link MapperService} should be closed when unneeded. */ public synchronized MapperService createIndexMapperService(IndexMetadata indexMetadata) throws IOException { @@ -930,7 +1004,7 @@ public IndexShard createShard( final RetentionLeaseSyncer retentionLeaseSyncer, final DiscoveryNode targetNode, final DiscoveryNode sourceNode, - final RemoteStorePressureService remoteStorePressureService + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); ensureChangesAllowed(); @@ -942,7 +1016,7 @@ public IndexShard createShard( globalCheckpointSyncer, retentionLeaseSyncer, checkpointPublisher, - remoteStorePressureService + remoteStoreStatsTrackerFactory ); indexShard.addShardFailureCallback(onShardFailure); indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService, mapping -> { @@ -1004,6 +1078,15 @@ public IndicesQueryCache getIndicesQueryCache() { return indicesQueryCache; } + /** + * Accumulate stats from the passed Object + * + * @param stats Instance storing {@link DocStatusStats} + */ + public void addDocStatusStats(final DocStatusStats stats) { + oldShardsStats.indexingStats.getTotal().getDocStatusStats().add(stats); + } + /** * Statistics for old shards * @@ -1069,7 +1152,7 @@ public void deleteUnassignedIndex(String reason, IndexMetadata metadata, Cluster /** * Deletes the index store trying to acquire all shards locks for this index. * This method will delete the metadata for the index even if the actual shards can't be locked. - * + *

              * Package private for testing */ void deleteIndexStore(String reason, IndexMetadata metadata) throws IOException { @@ -1150,7 +1233,7 @@ public void deleteShardStore(String reason, ShardLock lock, IndexSettings indexS * This method deletes the shard contents on disk for the given shard ID. This method will fail if the shard deleting * is prevented by {@link #canDeleteShardContent(ShardId, IndexSettings)} * of if the shards lock can not be acquired. - * + *

              * On data nodes, if the deleted shard is the last shard folder in its index, the method will attempt to remove * the index folder as well. * @@ -1861,4 +1944,85 @@ public boolean allPendingDanglingIndicesWritten() { return nodeWriteDanglingIndicesInfo == false || (danglingIndicesToWrite.isEmpty() && danglingIndicesThreadPoolExecutor.getActiveCount() == 0); } + + /** + * Validates the cluster default index refresh interval. + * + * @opensearch.internal + */ + private static final class ClusterDefaultRefreshIntervalValidator implements Setting.Validator { + + @Override + public void validate(TimeValue value) { + + } + + @Override + public void validate(final TimeValue defaultRefreshInterval, final Map, Object> settings) { + final TimeValue minimumRefreshInterval = (TimeValue) settings.get(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING); + validateRefreshIntervalSettings(minimumRefreshInterval, defaultRefreshInterval); + } + + @Override + public Iterator> settings() { + final List> settings = List.of(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING); + return settings.iterator(); + } + } + + /** + * Validates the cluster minimum index refresh interval. + * + * @opensearch.internal + */ + private static final class ClusterMinimumRefreshIntervalValidator implements Setting.Validator { + + @Override + public void validate(TimeValue value) { + + } + + @Override + public void validate(final TimeValue minimumRefreshInterval, final Map, Object> settings) { + final TimeValue defaultRefreshInterval = (TimeValue) settings.get(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING); + validateRefreshIntervalSettings(minimumRefreshInterval, defaultRefreshInterval); + } + + @Override + public Iterator> settings() { + final List> settings = List.of(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING); + return settings.iterator(); + } + } + + /** + * Validates that the cluster minimum refresh interval is not more than the cluster default refresh interval. + * + * @param minimumRefreshInterval value of cluster minimum index refresh interval setting + * @param defaultRefreshInterval value of cluster default index refresh interval setting + */ + private static void validateRefreshIntervalSettings(TimeValue minimumRefreshInterval, TimeValue defaultRefreshInterval) { + if (minimumRefreshInterval.compareTo(defaultRefreshInterval) > 0) { + throw new IllegalArgumentException( + "cluster minimum index refresh interval [" + + minimumRefreshInterval + + "] more than cluster default index refresh interval [" + + defaultRefreshInterval + + "]" + ); + } + } + + private TimeValue getClusterDefaultRefreshInterval() { + return this.clusterDefaultRefreshInterval; + } + + // Exclusively for testing, please do not use it elsewhere. + public TimeValue getClusterRemoteTranslogBufferInterval() { + return clusterRemoteTranslogBufferInterval; + } + + private void setClusterRemoteTranslogBufferInterval(TimeValue clusterRemoteTranslogBufferInterval) { + this.clusterRemoteTranslogBufferInterval = clusterRemoteTranslogBufferInterval; + } } diff --git a/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java b/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java index cc3d8193dfa6b..8a7aaba2726f4 100644 --- a/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java +++ b/server/src/main/java/org/opensearch/indices/NodeIndicesStats.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.admin.indices.stats.IndexShardStats; import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.common.Nullable; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -71,7 +72,6 @@ * @opensearch.internal */ public class NodeIndicesStats implements Writeable, ToXContentFragment { - private CommonStats stats; private Map> statsByShard; @@ -92,7 +92,7 @@ public NodeIndicesStats(StreamInput in) throws IOException { } } - public NodeIndicesStats(CommonStats oldStats, Map> statsByShard) { + public NodeIndicesStats(CommonStats oldStats, Map> statsByShard, SearchRequestStats searchRequestStats) { // this.stats = stats; this.statsByShard = statsByShard; @@ -105,6 +105,9 @@ public NodeIndicesStats(CommonStats oldStats, Map> } } } + if (this.stats.search != null) { + this.stats.search.setSearchRequestStats(searchRequestStats); + } } @Nullable diff --git a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java index d9a64781c3f46..e345b613eebbd 100644 --- a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java +++ b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java @@ -54,7 +54,7 @@ /** * This class contains the logic used to check the cluster-wide shard limit before shards are created and ensuring that the limit is * updated correctly on setting updates, etc. - * + *

              * NOTE: This is the limit applied at *shard creation time*. If you are looking for the limit applied at *allocation* time, which is * controlled by a different setting, * see {@link org.opensearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider}. diff --git a/server/src/main/java/org/opensearch/indices/analysis/PreBuiltCacheFactory.java b/server/src/main/java/org/opensearch/indices/analysis/PreBuiltCacheFactory.java index 601bd79a24746..13cc78b620b9a 100644 --- a/server/src/main/java/org/opensearch/indices/analysis/PreBuiltCacheFactory.java +++ b/server/src/main/java/org/opensearch/indices/analysis/PreBuiltCacheFactory.java @@ -48,11 +48,11 @@ public class PreBuiltCacheFactory { /** * The strategy of caching the analyzer - * - * ONE Exactly one version is stored. Useful for analyzers which do not store version information - * LUCENE Exactly one version for each lucene version is stored. Useful to prevent different analyzers with the same version - * OPENSEARCH Exactly one version per opensearch version is stored. Useful if you change an analyzer between opensearch - * releases, when the lucene version does not change + *

                + *
              • ONE : Exactly one version is stored. Useful for analyzers which do not store version information
              • + *
              • LUCENE : Exactly one version for each lucene version is stored. Useful to prevent different analyzers with the same version
              • + *
              • OPENSEARCH : Exactly one version per opensearch version is stored. Useful if you change an analyzer between opensearch releases, when the lucene version does not change
              • + *
              */ public enum CachingStrategy { ONE, diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index f94098ddb7c88..8a666c6b260b1 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -55,7 +55,6 @@ import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.action.ActionListener; @@ -66,7 +65,7 @@ import org.opensearch.index.IndexComponent; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.remote.RemoteStorePressureService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.seqno.GlobalCheckpointSyncAction; import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLeaseSyncer; @@ -149,7 +148,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple private final SegmentReplicationCheckpointPublisher checkpointPublisher; - private final RemoteStorePressureService remoteStorePressureService; + private final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; @Inject public IndicesClusterStateService( @@ -170,7 +169,7 @@ public IndicesClusterStateService( final GlobalCheckpointSyncAction globalCheckpointSyncAction, final RetentionLeaseSyncer retentionLeaseSyncer, final SegmentReplicationCheckpointPublisher checkpointPublisher, - final RemoteStorePressureService remoteStorePressureService + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) { this( settings, @@ -190,7 +189,7 @@ public IndicesClusterStateService( primaryReplicaSyncer, globalCheckpointSyncAction::updateGlobalCheckpointForShard, retentionLeaseSyncer, - remoteStorePressureService + remoteStoreStatsTrackerFactory ); } @@ -213,7 +212,7 @@ public IndicesClusterStateService( final PrimaryReplicaSyncer primaryReplicaSyncer, final Consumer globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, - final RemoteStorePressureService remoteStorePressureService + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) { this.settings = settings; this.checkpointPublisher = checkpointPublisher; @@ -223,10 +222,7 @@ public IndicesClusterStateService( ); indexEventListeners.add(segmentReplicationTargetService); indexEventListeners.add(segmentReplicationSourceService); - // if remote store feature is not enabled, do not wire the remote upload pressure service as an IndexEventListener. - if (FeatureFlags.isEnabled(FeatureFlags.REMOTE_STORE)) { - indexEventListeners.add(remoteStorePressureService); - } + indexEventListeners.add(remoteStoreStatsTrackerFactory); this.segmentReplicationTargetService = segmentReplicationTargetService; this.builtInIndexListener = Collections.unmodifiableList(indexEventListeners); this.indicesService = indicesService; @@ -240,7 +236,7 @@ public IndicesClusterStateService( this.globalCheckpointSyncer = globalCheckpointSyncer; this.retentionLeaseSyncer = Objects.requireNonNull(retentionLeaseSyncer); this.sendRefreshMapping = settings.getAsBoolean("indices.cluster.send_refresh_mapping", true); - this.remoteStorePressureService = remoteStorePressureService; + this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; } @Override @@ -683,7 +679,7 @@ private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardR retentionLeaseSyncer, nodes.getLocalNode(), sourceNode, - remoteStorePressureService + remoteStoreStatsTrackerFactory ); } catch (Exception e) { failAndRemoveShard(shardRouting, true, "failed to create shard", e, state); @@ -1028,6 +1024,7 @@ U createIndex(IndexMetadata indexMetadata, List builtInIndex * @param retentionLeaseSyncer a callback when this shard syncs retention leases * @param targetNode the node where this shard will be recovered * @param sourceNode the source node to recover this shard from (it might be null) + * @param remoteStoreStatsTrackerFactory factory for remote store stats trackers * @return a new shard * @throws IOException if an I/O exception occurs when creating the shard */ @@ -1042,7 +1039,7 @@ T createShard( RetentionLeaseSyncer retentionLeaseSyncer, DiscoveryNode targetNode, @Nullable DiscoveryNode sourceNode, - RemoteStorePressureService remoteStorePressureService + RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) throws IOException; /** diff --git a/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java index a915046c381fc..ac6b2e6b77d18 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java @@ -161,6 +161,7 @@ && isTargetSameHistory() }, shardId + " removing retention lease for [" + request.targetAllocationId() + "]", shard, cancellableThreads, logger); deleteRetentionLeaseStep.whenComplete(ignored -> { + logger.debug("deleteRetentionLeaseStep completed"); assert Transports.assertNotTransportThread(this + "[phase1]"); phase1(wrappedSafeCommit.get(), startingSeqNo, () -> estimateNumOps, sendFileStep, false); }, onFailure); @@ -172,12 +173,14 @@ && isTargetSameHistory() assert startingSeqNo >= 0 : "startingSeqNo must be non negative. got: " + startingSeqNo; sendFileStep.whenComplete(r -> { + logger.debug("sendFileStep completed"); assert Transports.assertNotTransportThread(this + "[prepareTargetForTranslog]"); // For a sequence based recovery, the target can keep its local translog prepareTargetForTranslog(countNumberOfHistoryOperations(startingSeqNo), prepareEngineStep); }, onFailure); prepareEngineStep.whenComplete(prepareEngineTime -> { + logger.debug("prepareEngineStep completed"); assert Transports.assertNotTransportThread(this + "[phase2]"); /* * add shard to replication group (shard will receive replication requests from this point on) now that engine is open. diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java index 6c7632a8a408d..cb2bedf00de99 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java @@ -376,7 +376,8 @@ private Tuple createRecovery transportService, request.targetNode(), recoverySettings, - throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime) + throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime), + shard.isRemoteTranslogEnabled() ); handler = RecoverySourceHandlerFactory.create(shard, recoveryTarget, request, recoverySettings); return Tuple.tuple(handler, recoveryTarget); diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index c76637b59ca59..e59766d0274f1 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -246,7 +246,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi indexShard.prepareForIndexRecovery(); final boolean hasRemoteSegmentStore = indexShard.indexSettings().isRemoteStoreEnabled(); if (hasRemoteSegmentStore) { - indexShard.syncSegmentsFromRemoteSegmentStore(false, true); + indexShard.syncSegmentsFromRemoteSegmentStore(false, recoveryTarget::setLastAccessTime); } final boolean hasRemoteTranslog = recoveryTarget.state().getPrimary() == false && indexShard.isRemoteTranslogEnabled(); final boolean hasNoTranslog = indexShard.indexSettings().isRemoteSnapshot(); @@ -265,7 +265,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi actionName = PeerRecoverySourceService.Actions.START_RECOVERY; } catch (final Exception e) { // this will be logged as warning later on... - logger.trace("unexpected error while preparing shard for peer recovery, failing recovery", e); + logger.debug("unexpected error while preparing shard for peer recovery, failing recovery", e); onGoingRecoveries.fail( recoveryId, new RecoveryFailedException(recoveryTarget.state(), "failed to prepare shard for recovery", e), @@ -273,12 +273,12 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi ); return; } - logger.trace("{} starting recovery from {}", startRequest.shardId(), startRequest.sourceNode()); + logger.debug("{} starting recovery from {}", startRequest.shardId(), startRequest.sourceNode()); } else { startRequest = preExistingRequest; requestToSend = new ReestablishRecoveryRequest(recoveryId, startRequest.shardId(), startRequest.targetAllocationId()); actionName = PeerRecoverySourceService.Actions.REESTABLISH_RECOVERY; - logger.trace("{} reestablishing recovery from {}", startRequest.shardId(), startRequest.sourceNode()); + logger.debug("{} reestablishing recovery from {}", startRequest.shardId(), startRequest.sourceNode()); } } transportService.sendRequest( diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java index e2346ae078339..0f3025369833d 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java @@ -41,6 +41,7 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; @@ -84,6 +85,17 @@ public class RecoverySettings { Property.NodeScope ); + /** + * Controls the maximum number of streams that can be started concurrently per recovery when downloading from the remote store. + */ + public static final Setting INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING = new Setting<>( + "indices.recovery.max_concurrent_remote_store_streams", + (s) -> Integer.toString(Math.max(1, OpenSearchExecutors.allocatedProcessors(s) / 2)), + (s) -> Setting.parseInt(s, 1, "indices.recovery.max_concurrent_remote_store_streams"), + Property.Dynamic, + Property.NodeScope + ); + /** * how long to wait before retrying after issues cause by cluster state syncing between nodes * i.e., local node is not yet known on remote node, remote shard not yet started etc. @@ -149,6 +161,7 @@ public class RecoverySettings { private volatile ByteSizeValue maxBytesPerSec; private volatile int maxConcurrentFileChunks; private volatile int maxConcurrentOperations; + private volatile int maxConcurrentRemoteStoreStreams; private volatile SimpleRateLimiter rateLimiter; private volatile TimeValue retryDelayStateSync; private volatile TimeValue retryDelayNetwork; @@ -163,6 +176,7 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { this.retryDelayStateSync = INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.get(settings); this.maxConcurrentFileChunks = INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING.get(settings); this.maxConcurrentOperations = INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING.get(settings); + this.maxConcurrentRemoteStoreStreams = INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING.get(settings); // doesn't have to be fast as nodes are reconnected every 10s by default (see InternalClusterService.ReconnectToNodes) // and we want to give the cluster-manager time to remove a faulty node this.retryDelayNetwork = INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.get(settings); @@ -184,6 +198,10 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, this::setMaxBytesPerSec); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_CONCURRENT_FILE_CHUNKS_SETTING, this::setMaxConcurrentFileChunks); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING, this::setMaxConcurrentOperations); + clusterSettings.addSettingsUpdateConsumer( + INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING, + this::setMaxConcurrentRemoteStoreStreams + ); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, this::setRetryDelayStateSync); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, this::setRetryDelayNetwork); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, this::setInternalActionTimeout); @@ -279,4 +297,12 @@ public int getMaxConcurrentOperations() { private void setMaxConcurrentOperations(int maxConcurrentOperations) { this.maxConcurrentOperations = maxConcurrentOperations; } + + public int getMaxConcurrentRemoteStoreStreams() { + return this.maxConcurrentRemoteStoreStreams; + } + + private void setMaxConcurrentRemoteStoreStreams(int maxConcurrentRemoteStoreStreams) { + this.maxConcurrentRemoteStoreStreams = maxConcurrentRemoteStoreStreams; + } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java index 349ceb51d8173..7d7e2f6114129 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java @@ -202,9 +202,13 @@ protected void finalizeStepAndCompleteFuture( final StepListener finalizeStep = new StepListener<>(); // Recovery target can trim all operations >= startingSeqNo as we have sent all these operations in the phase 2 final long trimAboveSeqNo = startingSeqNo - 1; - sendSnapshotStep.whenComplete(r -> finalizeRecovery(r.targetLocalCheckpoint, trimAboveSeqNo, finalizeStep), onFailure); + sendSnapshotStep.whenComplete(r -> { + logger.debug("sendSnapshotStep completed"); + finalizeRecovery(r.targetLocalCheckpoint, trimAboveSeqNo, finalizeStep); + }, onFailure); finalizeStep.whenComplete(r -> { + logger.debug("finalizeStep completed"); final long phase1ThrottlingWaitTime = 0L; // TODO: return the actual throttle time final SendSnapshotResult sendSnapshotResult = sendSnapshotStep.result(); final SendFileResult sendFileResult = sendFileStep.result(); @@ -234,7 +238,10 @@ protected void onSendFileStepComplete( GatedCloseable wrappedSafeCommit, Releasable releaseStore ) { - sendFileStep.whenComplete(r -> IOUtils.close(wrappedSafeCommit, releaseStore), e -> { + sendFileStep.whenComplete(r -> { + logger.debug("sendFileStep completed"); + IOUtils.close(wrappedSafeCommit, releaseStore); + }, e -> { try { IOUtils.close(wrappedSafeCommit, releaseStore); } catch (final IOException ex) { @@ -446,16 +453,22 @@ void phase1( sendFileInfoStep ); - sendFileInfoStep.whenComplete( - r -> sendFiles(store, phase1Files.toArray(new StoreFileMetadata[0]), translogOps, sendFilesStep), - listener::onFailure - ); + sendFileInfoStep.whenComplete(r -> { + logger.debug("sendFileInfoStep completed"); + sendFiles(store, phase1Files.toArray(new StoreFileMetadata[0]), translogOps, sendFilesStep); + }, listener::onFailure); // When doing peer recovery of remote store enabled replica, retention leases are not required. if (skipCreateRetentionLeaseStep) { - sendFilesStep.whenComplete(r -> createRetentionLeaseStep.onResponse(null), listener::onFailure); + sendFilesStep.whenComplete(r -> { + logger.debug("sendFilesStep completed"); + createRetentionLeaseStep.onResponse(null); + }, listener::onFailure); } else { - sendFilesStep.whenComplete(r -> createRetentionLease(startingSeqNo, createRetentionLeaseStep), listener::onFailure); + sendFilesStep.whenComplete(r -> { + logger.debug("sendFilesStep completed"); + createRetentionLease(startingSeqNo, createRetentionLeaseStep); + }, listener::onFailure); } createRetentionLeaseStep.whenComplete(retentionLease -> { @@ -472,6 +485,7 @@ void phase1( final long totalSize = totalSizeInBytes; final long existingTotalSize = existingTotalSizeInBytes; cleanFilesStep.whenComplete(r -> { + logger.debug("cleanFilesStep completed"); final TimeValue took = stopWatch.totalTime(); logger.trace("recovery [phase1]: took [{}]", took); listener.onResponse( @@ -542,7 +556,10 @@ void createRetentionLease(final long startingSeqNo, ActionListener(logger, shard.getThreadPool(), ThreadPool.Names.GENERIC, cloneRetentionLeaseStep, false) ); logger.trace("cloned primary's retention lease as [{}]", clonedLease); - cloneRetentionLeaseStep.whenComplete(rr -> listener.onResponse(clonedLease), listener::onFailure); + cloneRetentionLeaseStep.whenComplete(rr -> { + logger.debug("cloneRetentionLeaseStep completed"); + listener.onResponse(clonedLease); + }, listener::onFailure); } catch (RetentionLeaseNotFoundException e) { // it's possible that the primary has no retention lease yet if we are doing a rolling upgrade from a version before // 7.4, and in that case we just create a lease using the local checkpoint of the safe commit which we're using for @@ -556,7 +573,10 @@ void createRetentionLease(final long startingSeqNo, ActionListener(logger, shard.getThreadPool(), ThreadPool.Names.GENERIC, addRetentionLeaseStep, false) ); - addRetentionLeaseStep.whenComplete(rr -> listener.onResponse(newLease), listener::onFailure); + addRetentionLeaseStep.whenComplete(rr -> { + logger.debug("addRetentionLeaseStep completed"); + listener.onResponse(newLease); + }, listener::onFailure); logger.trace("created retention lease with estimated checkpoint of [{}]", estimatedGlobalCheckpoint); } }, shardId + " establishing retention lease for [" + request.targetAllocationId() + "]", shard, cancellableThreads, logger); @@ -812,6 +832,7 @@ void finalizeRecovery(long targetLocalCheckpoint, long trimAboveSeqNo, ActionLis cancellableThreads.checkForCancel(); recoveryTarget.finalizeRecovery(globalCheckpoint, trimAboveSeqNo, finalizeListener); finalizeListener.whenComplete(r -> { + logger.debug("finalizeListenerStep completed"); RunUnderPrimaryPermit.run( () -> shard.updateGlobalCheckpointForShard(request.targetAllocationId(), globalCheckpoint), shardId + " updating " + request.targetAllocationId() + "'s global checkpoint", diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTargetHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTargetHandler.java index ac28dabf815a5..707e41c8c27e1 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTargetHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTargetHandler.java @@ -55,7 +55,7 @@ public interface RecoveryTargetHandler extends FileChunkWriter { /** * Used with Segment replication only - * + *

              * This function is used to force a sync target primary node with source (old primary). This is to avoid segment files * conflict with replicas when target is promoted as primary. */ diff --git a/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java index 36beabc4a9026..37227596fdfe7 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -75,6 +75,7 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { private final AtomicLong requestSeqNoGenerator = new AtomicLong(0); private final RetryableTransportClient retryableTransportClient; private final RemoteSegmentFileChunkWriter fileChunkWriter; + private final boolean remoteStoreEnabled; public RemoteRecoveryTargetHandler( long recoveryId, @@ -82,7 +83,8 @@ public RemoteRecoveryTargetHandler( TransportService transportService, DiscoveryNode targetNode, RecoverySettings recoverySettings, - Consumer onSourceThrottle + Consumer onSourceThrottle, + boolean remoteStoreEnabled ) { this.transportService = transportService; // It is safe to pass the retry timeout value here because RemoteRecoveryTargetHandler @@ -111,6 +113,7 @@ public RemoteRecoveryTargetHandler( requestSeqNoGenerator, onSourceThrottle ); + this.remoteStoreEnabled = remoteStoreEnabled; } public DiscoveryNode targetNode() { @@ -129,7 +132,13 @@ public void prepareForTranslogOperations(int totalTranslogOps, ActionListener reader = in -> TransportResponse.Empty.INSTANCE; final ActionListener responseListener = ActionListener.map(listener, r -> null); - retryableTransportClient.executeRetryableAction(action, request, responseListener, reader); + if (remoteStoreEnabled) { + // If remote store is enabled, during the prepare_translog phase, translog is also downloaded on the + // target host along with incremental segments download. + retryableTransportClient.executeRetryableAction(action, request, translogOpsRequestOptions, responseListener, reader); + } else { + retryableTransportClient.executeRetryableAction(action, request, responseListener, reader); + } } @Override @@ -189,7 +198,7 @@ public void indexTranslogOperations( /** * Used with Segment replication only - * + *

              * This function is used to force a sync target primary node with source (old primary). This is to avoid segment files * conflict with replicas when target is promoted as primary. */ diff --git a/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java index 5d6c0eb3bae05..66c7a3b48f28f 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java @@ -80,12 +80,14 @@ protected void innerRecoveryToTarget(ActionListener listener, assert startingSeqNo >= 0 : "startingSeqNo must be non negative. got: " + startingSeqNo; sendFileStep.whenComplete(r -> { + logger.debug("sendFileStep completed"); assert Transports.assertNotTransportThread(this + "[prepareTargetForTranslog]"); // For a sequence based recovery, the target can keep its local translog prepareTargetForTranslog(0, prepareEngineStep); }, onFailure); prepareEngineStep.whenComplete(prepareEngineTime -> { + logger.debug("prepareEngineStep completed"); assert Transports.assertNotTransportThread(this + "[phase2]"); RunUnderPrimaryPermit.run( () -> shard.initiateTracking(request.targetAllocationId()), diff --git a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java index 268894b1c0af3..33967c0203516 100644 --- a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java +++ b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java @@ -61,8 +61,8 @@ class OngoingSegmentReplications { this.allocationIdToHandlers = ConcurrentCollections.newConcurrentMap(); } - /** - * Operations on the {@link #copyStateMap} member. + /* + Operations on the {@link #copyStateMap} member. */ /** @@ -85,12 +85,12 @@ synchronized CopyState getCachedCopyState(ReplicationCheckpoint checkpoint) thro // build the CopyState object and cache it before returning final CopyState copyState = new CopyState(checkpoint, indexShard); - /** - * Use the checkpoint from the request as the key in the map, rather than - * the checkpoint from the created CopyState. This maximizes cache hits - * if replication targets make a request with an older checkpoint. - * Replication targets are expected to fetch the checkpoint in the response - * CopyState to bring themselves up to date. + /* + Use the checkpoint from the request as the key in the map, rather than + the checkpoint from the created CopyState. This maximizes cache hits + if replication targets make a request with an older checkpoint. + Replication targets are expected to fetch the checkpoint in the response + CopyState to bring themselves up to date. */ addToCopyStateMap(checkpoint, copyState); return copyState; diff --git a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java index 9dcd16c53e6f3..02fc8feefd698 100644 --- a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java @@ -22,6 +22,7 @@ import org.opensearch.transport.TransportService; import java.util.List; +import java.util.function.BiConsumer; import static org.opensearch.indices.replication.SegmentReplicationSourceService.Actions.GET_CHECKPOINT_INFO; import static org.opensearch.indices.replication.SegmentReplicationSourceService.Actions.GET_SEGMENT_FILES; @@ -80,8 +81,13 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { + // fileProgressTracker is a no-op for node to node recovery + // MultiFileWriter takes care of progress tracking for downloads in this scenario + // TODO: Move state management and tracking into replication methods and use chunking and data + // copy mechanisms only from MultiFileWriter final Writeable.Reader reader = GetSegmentFilesResponse::new; final ActionListener responseListener = ActionListener.map(listener, r -> r); final GetSegmentFilesRequest request = new GetSegmentFilesRequest( diff --git a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java index 7252fea044a02..b06b3e0497cf7 100644 --- a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java @@ -13,9 +13,9 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; -import org.apache.lucene.store.IOContext; import org.apache.lucene.util.Version; import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.util.CancellableThreads; import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; @@ -25,11 +25,15 @@ import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; import java.util.stream.Collectors; /** @@ -43,6 +47,7 @@ public class RemoteStoreReplicationSource implements SegmentReplicationSource { private final IndexShard indexShard; private final RemoteSegmentStoreDirectory remoteDirectory; + private final CancellableThreads cancellableThreads = new CancellableThreads(); public RemoteStoreReplicationSource(IndexShard indexShard) { this.indexShard = indexShard; @@ -61,7 +66,7 @@ public void getCheckpointMetadata( // TODO: Need to figure out a way to pass this information for segment metadata via remote store. try (final GatedCloseable segmentInfosSnapshot = indexShard.getSegmentInfosSnapshot()) { final Version version = segmentInfosSnapshot.get().getCommitLuceneVersion(); - RemoteSegmentMetadata mdFile = remoteDirectory.init(); + final RemoteSegmentMetadata mdFile = getRemoteSegmentMetadata(); // During initial recovery flow, the remote store might not // have metadata as primary hasn't uploaded anything yet. if (mdFile == null && indexShard.state().equals(IndexShardState.STARTED) == false) { @@ -96,6 +101,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { try { @@ -103,36 +109,52 @@ public void getSegmentFiles( listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); return; } - logger.trace("Downloading segments files from remote store {}", filesToFetch); + logger.debug("Downloading segment files from remote store {}", filesToFetch); - RemoteSegmentMetadata remoteSegmentMetadata = remoteDirectory.readLatestMetadataFile(); - List downloadedSegments = new ArrayList<>(); - Collection directoryFiles = List.of(indexShard.store().directory().listAll()); - if (remoteSegmentMetadata != null) { - try { - indexShard.store().incRef(); - indexShard.remoteStore().incRef(); - final Directory storeDirectory = indexShard.store().directory(); - for (StoreFileMetadata fileMetadata : filesToFetch) { - String file = fileMetadata.name(); - assert directoryFiles.contains(file) == false : "Local store already contains the file " + file; - storeDirectory.copyFrom(remoteDirectory, file, file, IOContext.DEFAULT); - downloadedSegments.add(fileMetadata); - } - logger.trace("Downloaded segments from remote store {}", downloadedSegments); - } finally { - indexShard.store().decRef(); - indexShard.remoteStore().decRef(); + if (remoteMetadataExists()) { + final Directory storeDirectory = indexShard.store().directory(); + final Collection directoryFiles = List.of(storeDirectory.listAll()); + final List toDownloadSegmentNames = new ArrayList<>(); + for (StoreFileMetadata fileMetadata : filesToFetch) { + String file = fileMetadata.name(); + assert directoryFiles.contains(file) == false : "Local store already contains the file " + file; + toDownloadSegmentNames.add(file); } + indexShard.getFileDownloader() + .downloadAsync( + cancellableThreads, + remoteDirectory, + new ReplicationStatsDirectoryWrapper(storeDirectory, fileProgressTracker), + toDownloadSegmentNames, + ActionListener.map(listener, r -> new GetSegmentFilesResponse(filesToFetch)) + ); + } else { + listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); } - listener.onResponse(new GetSegmentFilesResponse(downloadedSegments)); - } catch (Exception e) { + } catch (IOException | RuntimeException e) { listener.onFailure(e); } } + @Override + public void cancel() { + this.cancellableThreads.cancel("Canceled by target"); + } + @Override public String getDescription() { return "RemoteStoreReplicationSource"; } + + private boolean remoteMetadataExists() throws IOException { + final AtomicBoolean metadataExists = new AtomicBoolean(false); + cancellableThreads.executeIO(() -> metadataExists.set(remoteDirectory.readLatestMetadataFile() != null)); + return metadataExists.get(); + } + + private RemoteSegmentMetadata getRemoteSegmentMetadata() throws IOException { + AtomicReference mdFile = new AtomicReference<>(); + cancellableThreads.executeIO(() -> mdFile.set(remoteDirectory.init())); + return mdFile.get(); + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java index 6676b5b667e42..24f0cb15ddb25 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java @@ -8,13 +8,19 @@ package org.opensearch.indices.replication; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; import org.opensearch.common.util.CancellableThreads.ExecutionCancelledException; import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import java.io.IOException; import java.util.List; +import java.util.function.BiConsumer; /** * Represents the source of a replication event. @@ -39,6 +45,7 @@ public interface SegmentReplicationSource { * @param checkpoint {@link ReplicationCheckpoint} Checkpoint to fetch metadata for. * @param filesToFetch {@link List} List of files to fetch. * @param indexShard {@link IndexShard} Reference to the IndexShard. + * @param fileProgressTracker {@link BiConsumer} A consumer that updates the replication progress for shard files. * @param listener {@link ActionListener} Listener that completes with the list of files copied. */ void getSegmentFiles( @@ -46,6 +53,7 @@ void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ); @@ -58,4 +66,69 @@ void getSegmentFiles( * Cancel any ongoing requests, should resolve any ongoing listeners with onFailure with a {@link ExecutionCancelledException}. */ default void cancel() {} + + /** + * Directory wrapper that records copy process for replication statistics + * + * @opensearch.internal + */ + final class ReplicationStatsDirectoryWrapper extends FilterDirectory { + private final BiConsumer fileProgressTracker; + + ReplicationStatsDirectoryWrapper(Directory in, BiConsumer fileProgressTracker) { + super(in); + this.fileProgressTracker = fileProgressTracker; + } + + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { + // here we wrap the index input form the source directory to report progress of file copy for the recovery stats. + // we increment the num bytes recovered in the readBytes method below, if users pull statistics they can see immediately + // how much has been recovered. + in.copyFrom(new FilterDirectory(from) { + @Override + public IndexInput openInput(String name, IOContext context) throws IOException { + final IndexInput input = in.openInput(name, context); + return new IndexInput("StatsDirectoryWrapper(" + input.toString() + ")") { + @Override + public void close() throws IOException { + input.close(); + } + + @Override + public long getFilePointer() { + throw new UnsupportedOperationException("only straight copies are supported"); + } + + @Override + public void seek(long pos) throws IOException { + throw new UnsupportedOperationException("seeks are not supported"); + } + + @Override + public long length() { + return input.length(); + } + + @Override + public IndexInput slice(String sliceDescription, long offset, long length) throws IOException { + throw new UnsupportedOperationException("slices are not supported"); + } + + @Override + public byte readByte() throws IOException { + throw new UnsupportedOperationException("use a buffer if you wanna perform well"); + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + // we rely on the fact that copyFrom uses a buffer + input.readBytes(b, offset, len); + fileProgressTracker.accept(dest, (long) len); + } + }; + } + }, src, dest, context); + } + } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java index e2c47b0fb3159..674c09311c645 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceHandler.java @@ -12,8 +12,6 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.StepListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.routing.IndexShardRoutingTable; -import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.ListenableFuture; @@ -22,7 +20,6 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.StoreFileMetadata; -import org.opensearch.indices.recovery.DelayRecoveryException; import org.opensearch.indices.recovery.FileChunkWriter; import org.opensearch.indices.recovery.MultiChunkTransfer; import org.opensearch.indices.replication.common.CopyState; @@ -146,12 +143,6 @@ public synchronized void sendFiles(GetSegmentFilesRequest request, ActionListene ); }; cancellableThreads.checkForCancel(); - final IndexShardRoutingTable routingTable = shard.getReplicationGroup().getRoutingTable(); - ShardRouting targetShardRouting = routingTable.getByAllocationId(request.getTargetAllocationId()); - if (targetShardRouting == null) { - logger.debug("delaying replication of {} as it is not listed as assigned to target node {}", shard.shardId(), targetNode); - throw new DelayRecoveryException("source node does not have the shard listed in its state as allocated on the node"); - } final StepListener sendFileStep = new StepListener<>(); Set storeFiles = new HashSet<>(Arrays.asList(shard.store().directory().listAll())); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 7a5f9608dace0..cc71ef816e525 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -9,13 +9,15 @@ package org.opensearch.indices.replication; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; import org.opensearch.OpenSearchCorruptionException; -import org.opensearch.OpenSearchException; import org.opensearch.action.StepListener; import org.opensearch.common.UUIDs; import org.opensearch.common.lucene.Lucene; @@ -33,8 +35,11 @@ import org.opensearch.indices.replication.common.ReplicationTarget; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.List; import java.util.Locale; +import java.util.Set; +import java.util.stream.Collectors; /** * Represents the target of a replication event. @@ -165,7 +170,14 @@ public void startReplication(ActionListener listener) { final List filesToFetch = getFiles(checkpointInfo); state.setStage(SegmentReplicationState.Stage.GET_FILES); cancellableThreads.checkForCancel(); - source.getSegmentFiles(getId(), checkpointInfo.getCheckpoint(), filesToFetch, indexShard, getFilesListener); + source.getSegmentFiles( + getId(), + checkpointInfo.getCheckpoint(), + filesToFetch, + indexShard, + this::updateFileRecoveryBytes, + getFilesListener + ); }, listener::onFailure); getFilesListener.whenComplete(response -> { @@ -178,7 +190,27 @@ private List getFiles(CheckpointInfoResponse checkpointInfo) cancellableThreads.checkForCancel(); state.setStage(SegmentReplicationState.Stage.FILE_DIFF); final Store.RecoveryDiff diff = Store.segmentReplicationDiff(checkpointInfo.getMetadataMap(), indexShard.getSegmentMetadataMap()); - logger.trace(() -> new ParameterizedMessage("Replication diff for checkpoint {} {}", checkpointInfo.getCheckpoint(), diff)); + // local files + final Set localFiles = Set.of(indexShard.store().directory().listAll()); + // set of local files that can be reused + final Set reuseFiles = diff.missing.stream() + .filter(storeFileMetadata -> localFiles.contains(storeFileMetadata.name())) + .filter(this::validateLocalChecksum) + .map(StoreFileMetadata::name) + .collect(Collectors.toSet()); + + final List missingFiles = diff.missing.stream() + .filter(md -> reuseFiles.contains(md.name()) == false) + .collect(Collectors.toList()); + + logger.trace( + () -> new ParameterizedMessage( + "Replication diff for checkpoint {} {} {}", + checkpointInfo.getCheckpoint(), + missingFiles, + diff.different + ) + ); /* * Segments are immutable. So if the replica has any segments with the same name that differ from the one in the incoming * snapshot from source that means the local copy of the segment has been corrupted/changed in some way and we throw an @@ -194,10 +226,48 @@ private List getFiles(CheckpointInfoResponse checkpointInfo) ); } - for (StoreFileMetadata file : diff.missing) { + for (StoreFileMetadata file : missingFiles) { state.getIndex().addFileDetail(file.name(), file.length(), false); } - return diff.missing; + return missingFiles; + } + + // pkg private for tests + private boolean validateLocalChecksum(StoreFileMetadata file) { + try (IndexInput indexInput = indexShard.store().directory().openInput(file.name(), IOContext.DEFAULT)) { + String checksum = Store.digestToString(CodecUtil.retrieveChecksum(indexInput)); + if (file.checksum().equals(checksum)) { + return true; + } else { + // clear local copy with mismatch. Safe because file is not referenced by active reader. + store.deleteQuiet(file.name()); + return false; + } + } catch (IOException e) { + logger.warn("Error reading " + file, e); + // Delete file on exceptions so that it can be re-downloaded. This is safe to do as this file is local only + // and not referenced by reader. + try { + indexShard.store().directory().deleteFile(file.name()); + } catch (IOException ex) { + throw new UncheckedIOException("Error reading " + file, e); + } + return false; + } + } + + /** + * Updates the state to reflect recovery progress for the given file and + * updates the last access time for the target. + * @param fileName Name of the file being downloaded + * @param bytesRecovered Number of bytes recovered + */ + private void updateFileRecoveryBytes(String fileName, long bytesRecovered) { + ReplicationLuceneIndex index = state.getIndex(); + if (index != null) { + index.addRecoveredBytesToFile(fileName, bytesRecovered); + } + setLastAccessTime(); } private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse) throws OpenSearchCorruptionException { @@ -220,9 +290,7 @@ private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse) } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) { // this is a fatal exception at this stage. // this means we transferred files from the remote that have not be checksummed and they are - // broken. We have to clean up this shard entirely, remove all files and bubble it up to the - // source shard since this index might be broken there as well? The Source can handle this and checks - // its content on disk if possible. + // broken. We have to clean up this shard entirely, remove all files and bubble it up. try { try { store.removeCorruptionMarker(); @@ -238,14 +306,14 @@ private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse) // In this case the shard is closed at some point while updating the reader. // This can happen when the engine is closed in a separate thread. logger.warn("Shard is already closed, closing replication"); - } catch (OpenSearchException ex) { + } catch (CancellableThreads.ExecutionCancelledException ex) { /* Ignore closed replication target as it can happen due to index shard closed event in a separate thread. In such scenario, ignore the exception */ - assert cancellableThreads.isCancelled() : "Replication target closed but segment replication not cancelled"; + assert cancellableThreads.isCancelled() : "Replication target cancelled but cancellable threads not cancelled"; } catch (Exception ex) { - throw new OpenSearchCorruptionException(ex); + throw new ReplicationFailedException(ex); } finally { if (store != null) { store.decRef(); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index c071b22ba4cba..73da0482537ad 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.index.CorruptIndexException; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchCorruptionException; import org.opensearch.action.support.ChannelActionListener; @@ -28,6 +29,7 @@ import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; +import org.opensearch.index.store.Store; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.indices.recovery.ForceSyncRequest; @@ -46,10 +48,12 @@ import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportService; +import java.io.IOException; import java.util.Map; import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; +import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.opensearch.indices.replication.SegmentReplicationSourceService.Actions.UPDATE_VISIBLE_CHECKPOINT; /** @@ -208,7 +212,7 @@ public SegmentReplicationState getSegmentReplicationState(ShardId shardId) { * @param replicaShard replica shard on which checkpoint is received */ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedCheckpoint, final IndexShard replicaShard) { - logger.trace(() -> new ParameterizedMessage("Replica received new replication checkpoint from primary [{}]", receivedCheckpoint)); + logger.debug(() -> new ParameterizedMessage("Replica received new replication checkpoint from primary [{}]", receivedCheckpoint)); // if the shard is in any state if (replicaShard.state().equals(IndexShardState.CLOSED)) { // ignore if shard is closed @@ -224,7 +228,7 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe SegmentReplicationTarget ongoingReplicationTarget = onGoingReplications.getOngoingReplicationTarget(replicaShard.shardId()); if (ongoingReplicationTarget != null) { if (ongoingReplicationTarget.getCheckpoint().getPrimaryTerm() < receivedCheckpoint.getPrimaryTerm()) { - logger.trace( + logger.debug( () -> new ParameterizedMessage( "Cancelling ongoing replication {} from old primary with primary term {}", ongoingReplicationTarget.description(), @@ -233,7 +237,7 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe ); ongoingReplicationTarget.cancel("Cancelling stuck target after new primary"); } else { - logger.trace( + logger.debug( () -> new ParameterizedMessage( "Ignoring new replication checkpoint - shard is currently replicating to checkpoint {}", ongoingReplicationTarget.getCheckpoint() @@ -247,7 +251,7 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe startReplication(replicaShard, receivedCheckpoint, new SegmentReplicationListener() { @Override public void onReplicationDone(SegmentReplicationState state) { - logger.trace( + logger.debug( () -> new ParameterizedMessage( "[shardId {}] [replication id {}] Replication complete to {}, timing data: {}", replicaShard.shardId().getId(), @@ -279,6 +283,12 @@ public void onReplicationFailure( } } }); + } else if (replicaShard.isSegmentReplicationAllowed()) { + // if we didn't process the checkpoint because we are up to date, + // send our latest checkpoint to the primary to update tracking. + // replicationId is not used by the primary set to a default value. + final long replicationId = NO_OPS_PERFORMED; + updateVisibleCheckpoint(replicationId, replicaShard); } } else { logger.trace( @@ -512,7 +522,7 @@ private void start(final long replicationId) { target.startReplication(new ActionListener<>() { @Override public void onResponse(Void o) { - logger.trace(() -> new ParameterizedMessage("Finished replicating {} marking as done.", target.description())); + logger.debug(() -> new ParameterizedMessage("Finished replicating {} marking as done.", target.description())); onGoingReplications.markAsDone(replicationId); if (target.state().getIndex().recoveredFileCount() != 0 && target.state().getIndex().recoveredBytes() != 0) { completedReplications.put(target.shardId(), target); @@ -521,7 +531,8 @@ public void onResponse(Void o) { @Override public void onFailure(Exception e) { - if (e instanceof OpenSearchCorruptionException) { + logger.debug("Replication failed {}", target.description()); + if (isStoreCorrupt(target) || e instanceof CorruptIndexException || e instanceof OpenSearchCorruptionException) { onGoingReplications.fail(replicationId, new ReplicationFailedException("Store corruption during replication", e), true); return; } @@ -530,6 +541,27 @@ public void onFailure(Exception e) { }); } + private boolean isStoreCorrupt(SegmentReplicationTarget target) { + // ensure target is not already closed. In that case + // we can assume the store is not corrupt and that the replication + // event completed successfully. + if (target.refCount() > 0) { + final Store store = target.store(); + if (store.tryIncRef()) { + try { + return store.isMarkedCorrupted(); + } catch (IOException ex) { + logger.warn("Unable to determine if store is corrupt", ex); + return false; + } finally { + store.decRef(); + } + } + } + // store already closed. + return false; + } + private class FileChunkTransportRequestHandler implements TransportRequestHandler { // How many bytes we've copied since we last called RateLimiter.pause diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index 76f0d333db977..11b01965c4af5 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -137,7 +137,7 @@ public String executor() { @Override public void handleResponse(ReplicationResponse response) { timer.stop(); - logger.trace( + logger.debug( () -> new ParameterizedMessage( "[shardId {}] Completed publishing checkpoint [{}], timing: {}", indexShard.shardId().getId(), @@ -152,7 +152,7 @@ public void handleResponse(ReplicationResponse response) { @Override public void handleException(TransportException e) { timer.stop(); - logger.trace("[shardId {}] Failed to publish checkpoint, timing: {}", indexShard.shardId().getId(), timer.time()); + logger.debug("[shardId {}] Failed to publish checkpoint, timing: {}", indexShard.shardId().getId(), timer.time()); task.setPhase("finished"); taskManager.unregister(task); if (ExceptionsHelper.unwrap( diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java index 70c3e71ba18b9..521522803c726 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java @@ -15,8 +15,11 @@ import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.store.StoreFileMetadata; import java.io.IOException; +import java.util.Collections; +import java.util.Map; import java.util.Objects; /** @@ -32,6 +35,7 @@ public class ReplicationCheckpoint implements Writeable, Comparable metadataMap; public static ReplicationCheckpoint empty(ShardId shardId) { return empty(shardId, ""); @@ -48,19 +52,29 @@ private ReplicationCheckpoint(ShardId shardId, String codec) { segmentInfosVersion = SequenceNumbers.NO_OPS_PERFORMED; length = 0L; this.codec = codec; + this.metadataMap = Collections.emptyMap(); } public ReplicationCheckpoint(ShardId shardId, long primaryTerm, long segmentsGen, long segmentInfosVersion, String codec) { - this(shardId, primaryTerm, segmentsGen, segmentInfosVersion, 0L, codec); - } - - public ReplicationCheckpoint(ShardId shardId, long primaryTerm, long segmentsGen, long segmentInfosVersion, long length, String codec) { + this(shardId, primaryTerm, segmentsGen, segmentInfosVersion, 0L, codec, Collections.emptyMap()); + } + + public ReplicationCheckpoint( + ShardId shardId, + long primaryTerm, + long segmentsGen, + long segmentInfosVersion, + long length, + String codec, + Map metadataMap + ) { this.shardId = shardId; this.primaryTerm = primaryTerm; this.segmentsGen = segmentsGen; this.segmentInfosVersion = segmentInfosVersion; this.length = length; this.codec = codec; + this.metadataMap = metadataMap; } public ReplicationCheckpoint(StreamInput in) throws IOException { @@ -75,6 +89,11 @@ public ReplicationCheckpoint(StreamInput in) throws IOException { length = 0L; codec = null; } + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { + this.metadataMap = in.readMap(StreamInput::readString, StoreFileMetadata::new); + } else { + this.metadataMap = Collections.emptyMap(); + } } /** @@ -135,6 +154,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(length); out.writeString(codec); } + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { + out.writeMap(metadataMap, StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut)); + } } @Override @@ -169,6 +191,10 @@ public boolean isAheadOf(@Nullable ReplicationCheckpoint other) { || (primaryTerm == other.getPrimaryTerm() && segmentInfosVersion > other.getSegmentInfosVersion()); } + public Map getMetadataMap() { + return metadataMap; + } + @Override public String toString() { return "ReplicationCheckpoint{" diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java index f5cb32b741862..b4bcdc92e539a 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java @@ -34,6 +34,7 @@ public SegmentReplicationCheckpointPublisher(PublishAction publishAction) { public void publish(IndexShard indexShard, ReplicationCheckpoint checkpoint) { publishAction.publish(indexShard, checkpoint); + indexShard.onCheckpointPublished(checkpoint); } /** diff --git a/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java b/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java index a6aa39e7cb074..3b7ae2af80ca0 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java @@ -8,7 +8,6 @@ package org.opensearch.indices.replication.common; -import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.ByteBuffersDataOutput; import org.apache.lucene.store.ByteBuffersIndexOutput; @@ -38,7 +37,6 @@ public class CopyState extends AbstractRefCounted { private final ReplicationCheckpoint replicationCheckpoint; private final Map metadataMap; private final byte[] infosBytes; - private GatedCloseable commitRef; private final IndexShard shard; public CopyState(ReplicationCheckpoint requestedReplicationCheckpoint, IndexShard shard) throws IOException { @@ -51,7 +49,6 @@ public CopyState(ReplicationCheckpoint requestedReplicationCheckpoint, IndexShar this.replicationCheckpoint = latestSegmentInfosAndCheckpoint.v2(); SegmentInfos segmentInfos = this.segmentInfosRef.get(); this.metadataMap = shard.store().getSegmentMetadataMap(segmentInfos); - this.commitRef = shard.acquireLastIndexCommit(false); ByteBuffersDataOutput buffer = new ByteBuffersDataOutput(); // resource description and name are not used, but resource description cannot be null @@ -65,10 +62,6 @@ public CopyState(ReplicationCheckpoint requestedReplicationCheckpoint, IndexShar protected void closeInternal() { try { segmentInfosRef.close(); - // commitRef may be null if there were no pending delete files - if (commitRef != null) { - commitRef.close(); - } } catch (IOException e) { throw new UncheckedIOException(e); } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java index 39649727e31d7..3775be7b6da15 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java @@ -58,8 +58,7 @@ public class ReplicationRequestTracker { * This method will mark that a request with a unique sequence number has been received. If this is the * first time the unique request has been received, this method will return a listener to be completed. * The caller should then perform the requested action and complete the returned listener. - * - * + *

              * If the unique request has already been received, this method will either complete the provided listener * or attach that listener to the listener returned in the first call. In this case, the method will * return null and the caller should not perform the requested action as a prior caller is already diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java index 4606d878052f7..aaac8998aad37 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java @@ -91,6 +91,9 @@ public ReplicationTarget(String name, IndexShard indexShard, ReplicationLuceneIn // make sure the store is not released until we are done. this.cancellableThreads = new CancellableThreads(); store.incRef(); + if (indexShard.indexSettings().isRemoteStoreEnabled()) { + indexShard.remoteStore().incRef(); + } } public long getId() { @@ -278,6 +281,12 @@ public abstract void writeFileChunk( ); protected void closeInternal() { - store.decRef(); + try { + store.decRef(); + } finally { + if (indexShard.indexSettings().isRemoteStoreEnabled()) { + indexShard.remoteStore().decRef(); + } + } } } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/SegmentReplicationLagTimer.java b/server/src/main/java/org/opensearch/indices/replication/common/SegmentReplicationLagTimer.java new file mode 100644 index 0000000000000..c97edba72da0d --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/SegmentReplicationLagTimer.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.common; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Wrapper class for Replication Timer which also tracks time elapsed since the timer was created. + * Currently, this is being used to calculate + * 1. Replication Lag: Total time taken by replica to sync after primary refreshed. + * 2. Replication event time: Total time taken by replica to sync after primary published the checkpoint + * (excludes the time spent by primary for uploading the segments to remote store). + * + * @opensearch.internal + */ +public class SegmentReplicationLagTimer extends ReplicationTimer { + private long creationTime; + + public SegmentReplicationLagTimer() { + super(); + creationTime = System.nanoTime(); + } + + public SegmentReplicationLagTimer(StreamInput in) throws IOException { + super(in); + creationTime = in.readVLong(); + } + + @Override + public synchronized void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVLong(creationTime); + } + + public long totalElapsedTime() { + return TimeValue.nsecToMSec(Math.max(System.nanoTime() - creationTime, 0)); + } +} diff --git a/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java b/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java index 01dafc52d4551..5185b740d90cb 100644 --- a/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java +++ b/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java @@ -35,6 +35,7 @@ import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.Nullable; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.common.bytes.BytesReference; @@ -73,7 +74,7 @@ private ConfigurationUtils() {} /** * Returns and removes the specified optional property from the specified configuration map. - * + *

              * If the property value isn't of type string a {@link OpenSearchParseException} is thrown. */ public static String readOptionalStringProperty( @@ -88,7 +89,7 @@ public static String readOptionalStringProperty( /** * Returns and removes the specified property from the specified configuration map. - * + *

              * If the property value isn't of type string an {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -103,7 +104,7 @@ public static String readStringProperty( /** * Returns and removes the specified property from the specified configuration map. - * + *

              * If the property value isn't of type string a {@link OpenSearchParseException} is thrown. * If the property is missing and no default value has been specified a {@link OpenSearchParseException} is thrown */ @@ -140,7 +141,7 @@ private static String readString(String processorType, String processorTag, Stri /** * Returns and removes the specified property from the specified configuration map. - * + *

              * If the property value isn't of type string or int a {@link OpenSearchParseException} is thrown. * If the property is missing and no default value has been specified a {@link OpenSearchParseException} is thrown */ @@ -179,7 +180,7 @@ private static String readStringOrInt(String processorType, String processorTag, /** * Returns and removes the specified property from the specified configuration map. - * + *

              * If the property value isn't of type string or int a {@link OpenSearchParseException} is thrown. */ public static String readOptionalStringOrIntProperty( @@ -227,7 +228,7 @@ private static Boolean readBoolean(String processorType, String processorTag, St /** * Returns and removes the specified property from the specified configuration map. - * + *

              * If the property value isn't of type int a {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -256,7 +257,7 @@ public static Integer readIntProperty( /** * Returns and removes the specified property from the specified configuration map. - * + *

              * If the property value isn't of type int a {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -284,7 +285,7 @@ public static Double readDoubleProperty( /** * Returns and removes the specified property of type list from the specified configuration map. - * + *

              * If the property value isn't of type list an {@link OpenSearchParseException} is thrown. */ public static List readOptionalList( @@ -302,7 +303,7 @@ public static List readOptionalList( /** * Returns and removes the specified property of type list from the specified configuration map. - * + *

              * If the property value isn't of type list an {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -332,7 +333,7 @@ private static List readList(String processorType, String processorTag, S /** * Returns and removes the specified property of type map from the specified configuration map. - * + *

              * If the property value isn't of type map an {@link OpenSearchParseException} is thrown. * If the property is missing an {@link OpenSearchParseException} is thrown */ @@ -352,7 +353,7 @@ public static Map readMap( /** * Returns and removes the specified property of type map from the specified configuration map. - * + *

              * If the property value isn't of type map an {@link OpenSearchParseException} is thrown. */ public static Map readOptionalMap( @@ -510,9 +511,11 @@ public static Processor readProcessor( Map processorFactories, ScriptService scriptService, String type, - Object config + @Nullable Object config ) throws Exception { - if (config instanceof Map) { + if (config == null) { + throw newConfigurationException(type, null, null, "the config of processor [" + type + "] cannot be null"); + } else if (config instanceof Map) { return readProcessor(processorFactories, scriptService, type, (Map) config); } else if (config instanceof String && "script".equals(type)) { Map normalizedScript = new HashMap<>(1); @@ -527,8 +530,11 @@ public static Processor readProcessor( Map processorFactories, ScriptService scriptService, String type, - Map config + @Nullable Map config ) throws Exception { + if (config == null) { + throw newConfigurationException(type, null, null, "expect the config of processor [" + type + "] to be map, but is null"); + } String tag = ConfigurationUtils.readOptionalStringProperty(null, null, config, TAG_KEY); String description = ConfigurationUtils.readOptionalStringProperty(null, tag, config, DESCRIPTION_KEY); boolean ignoreFailure = ConfigurationUtils.readBooleanProperty(null, null, config, IGNORE_FAILURE_KEY, false); diff --git a/server/src/main/java/org/opensearch/ingest/IngestService.java b/server/src/main/java/org/opensearch/ingest/IngestService.java index 91003e963e302..baf357a4bc0d5 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestService.java +++ b/server/src/main/java/org/opensearch/ingest/IngestService.java @@ -695,7 +695,7 @@ public IngestStats stats() { /** * Adds a listener that gets invoked with the current cluster state before processor factories * get invoked. - * + *

              * This is useful for components that are used by ingest processors, so that they have the opportunity to update * before these components get used by the ingest processor factory. */ diff --git a/server/src/main/java/org/opensearch/ingest/Pipeline.java b/server/src/main/java/org/opensearch/ingest/Pipeline.java index 37643e48ed2be..2541cfbf4af77 100644 --- a/server/src/main/java/org/opensearch/ingest/Pipeline.java +++ b/server/src/main/java/org/opensearch/ingest/Pipeline.java @@ -123,8 +123,8 @@ public static Pipeline create( /** * Modifies the data of a document to be indexed based on the processor this pipeline holds - * - * If null is returned then this document will be dropped and not indexed, otherwise + *

              + * If {@code null} is returned then this document will be dropped and not indexed, otherwise * this document will be kept and indexed. */ public void execute(IngestDocument ingestDocument, BiConsumer handler) { diff --git a/server/src/main/java/org/opensearch/ingest/Processor.java b/server/src/main/java/org/opensearch/ingest/Processor.java index e0f196dfcb115..6097045a87e21 100644 --- a/server/src/main/java/org/opensearch/ingest/Processor.java +++ b/server/src/main/java/org/opensearch/ingest/Processor.java @@ -48,7 +48,7 @@ /** * A processor implementation may modify the data belonging to a document. * Whether changes are made and what exactly is modified is up to the implementation. - * + *

              * Processors may get called concurrently and thus need to be thread-safe. * * @opensearch.internal @@ -57,7 +57,7 @@ public interface Processor { /** * Introspect and potentially modify the incoming data. - * + *

              * Expert method: only override this method if a processor implementation needs to make an asynchronous call, * otherwise just overwrite {@link #execute(IngestDocument)}. */ diff --git a/server/src/main/java/org/opensearch/ingest/ValueSource.java b/server/src/main/java/org/opensearch/ingest/ValueSource.java index 0ef7c3373596d..3463fb0f83b26 100644 --- a/server/src/main/java/org/opensearch/ingest/ValueSource.java +++ b/server/src/main/java/org/opensearch/ingest/ValueSource.java @@ -56,7 +56,7 @@ public interface ValueSource { /** * Returns a copy of the value this ValueSource holds and resolves templates if there're any. - * + *

              * For immutable values only a copy of the reference to the value is made. * * @param model The model to be used when resolving any templates diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java b/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java index cbde61925d834..73ae29920d2a0 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsInfo.java @@ -237,6 +237,14 @@ public static class DeviceStats implements Writeable, ToXContentFragment { final long previousWritesCompleted; final long currentSectorsWritten; final long previousSectorsWritten; + final long currentReadTime; + final long previousReadTime; + final long currentWriteTime; + final long previousWriteTime; + final long currentQueueSize; + final long previousQueueSize; + final long currentIOTime; + final long previousIOTime; public DeviceStats( final int majorDeviceNumber, @@ -246,6 +254,10 @@ public DeviceStats( final long currentSectorsRead, final long currentWritesCompleted, final long currentSectorsWritten, + final long currentReadTime, + final long currentWriteTime, + final long currrentQueueSize, + final long currentIOTime, final DeviceStats previousDeviceStats ) { this( @@ -259,7 +271,15 @@ public DeviceStats( currentSectorsRead, previousDeviceStats != null ? previousDeviceStats.currentSectorsRead : -1, currentWritesCompleted, - previousDeviceStats != null ? previousDeviceStats.currentWritesCompleted : -1 + previousDeviceStats != null ? previousDeviceStats.currentWritesCompleted : -1, + currentReadTime, + previousDeviceStats != null ? previousDeviceStats.currentReadTime : -1, + currentWriteTime, + previousDeviceStats != null ? previousDeviceStats.currentWriteTime : -1, + currrentQueueSize, + previousDeviceStats != null ? previousDeviceStats.currentQueueSize : -1, + currentIOTime, + previousDeviceStats != null ? previousDeviceStats.currentIOTime : -1 ); } @@ -274,7 +294,15 @@ private DeviceStats( final long currentSectorsRead, final long previousSectorsRead, final long currentWritesCompleted, - final long previousWritesCompleted + final long previousWritesCompleted, + final long currentReadTime, + final long previousReadTime, + final long currentWriteTime, + final long previousWriteTime, + final long currentQueueSize, + final long previousQueueSize, + final long currentIOTime, + final long previousIOTime ) { this.majorDeviceNumber = majorDeviceNumber; this.minorDeviceNumber = minorDeviceNumber; @@ -287,6 +315,14 @@ private DeviceStats( this.previousSectorsRead = previousSectorsRead; this.currentSectorsWritten = currentSectorsWritten; this.previousSectorsWritten = previousSectorsWritten; + this.currentReadTime = currentReadTime; + this.previousReadTime = previousReadTime; + this.currentWriteTime = currentWriteTime; + this.previousWriteTime = previousWriteTime; + this.currentQueueSize = currentQueueSize; + this.previousQueueSize = previousQueueSize; + this.currentIOTime = currentIOTime; + this.previousIOTime = previousIOTime; } public DeviceStats(StreamInput in) throws IOException { @@ -301,6 +337,25 @@ public DeviceStats(StreamInput in) throws IOException { previousSectorsRead = in.readLong(); currentSectorsWritten = in.readLong(); previousSectorsWritten = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + currentReadTime = in.readLong(); + previousReadTime = in.readLong(); + currentWriteTime = in.readLong(); + previousWriteTime = in.readLong(); + currentQueueSize = in.readLong(); + previousQueueSize = in.readLong(); + currentIOTime = in.readLong(); + previousIOTime = in.readLong(); + } else { + currentReadTime = 0; + previousReadTime = 0; + currentWriteTime = 0; + previousWriteTime = 0; + currentQueueSize = 0; + previousQueueSize = 0; + currentIOTime = 0; + previousIOTime = 0; + } } @Override @@ -316,6 +371,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(previousSectorsRead); out.writeLong(currentSectorsWritten); out.writeLong(previousSectorsWritten); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeLong(currentReadTime); + out.writeLong(previousReadTime); + out.writeLong(currentWriteTime); + out.writeLong(previousWriteTime); + out.writeLong(currentQueueSize); + out.writeLong(previousQueueSize); + out.writeLong(currentIOTime); + out.writeLong(previousIOTime); + } } public long operations() { @@ -348,6 +413,39 @@ public long writeKilobytes() { return (currentSectorsWritten - previousSectorsWritten) / 2; } + /** + * Total time taken for all read operations + */ + public long readTime() { + if (previousReadTime == -1) return -1; + return currentReadTime - previousReadTime; + } + + /** + * Total time taken for all write operations + */ + public long writeTime() { + if (previousWriteTime == -1) return -1; + return currentWriteTime - previousWriteTime; + } + + /** + * Queue size based on weighted time spent doing I/Os + */ + public long queueSize() { + if (previousQueueSize == -1) return -1; + return currentQueueSize - previousQueueSize; + } + + /** + * Total time spent doing I/Os + */ + public long ioTimeInMillis() { + if (previousIOTime == -1) return -1; + + return (currentIOTime - previousIOTime); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field("device_name", deviceName); @@ -356,9 +454,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(IoStats.WRITE_OPERATIONS, writeOperations()); builder.field(IoStats.READ_KILOBYTES, readKilobytes()); builder.field(IoStats.WRITE_KILOBYTES, writeKilobytes()); + builder.field(IoStats.READ_TIME, readTime()); + builder.field(IoStats.WRITE_TIME, writeTime()); + builder.field(IoStats.QUEUE_SIZE, queueSize()); + builder.field(IoStats.IO_TIME_MS, ioTimeInMillis()); return builder; } - } /** @@ -373,6 +474,10 @@ public static class IoStats implements Writeable, ToXContentFragment { private static final String WRITE_OPERATIONS = "write_operations"; private static final String READ_KILOBYTES = "read_kilobytes"; private static final String WRITE_KILOBYTES = "write_kilobytes"; + private static final String READ_TIME = "read_time"; + private static final String WRITE_TIME = "write_time"; + private static final String QUEUE_SIZE = "queue_size"; + private static final String IO_TIME_MS = "io_time_in_millis"; final DeviceStats[] devicesStats; final long totalOperations; @@ -380,6 +485,10 @@ public static class IoStats implements Writeable, ToXContentFragment { final long totalWriteOperations; final long totalReadKilobytes; final long totalWriteKilobytes; + final long totalReadTime; + final long totalWriteTime; + final long totalQueueSize; + final long totalIOTimeInMillis; public IoStats(final DeviceStats[] devicesStats) { this.devicesStats = devicesStats; @@ -388,18 +497,30 @@ public IoStats(final DeviceStats[] devicesStats) { long totalWriteOperations = 0; long totalReadKilobytes = 0; long totalWriteKilobytes = 0; + long totalReadTime = 0; + long totalWriteTime = 0; + long totalQueueSize = 0; + long totalIOTimeInMillis = 0; for (DeviceStats deviceStats : devicesStats) { totalOperations += deviceStats.operations() != -1 ? deviceStats.operations() : 0; totalReadOperations += deviceStats.readOperations() != -1 ? deviceStats.readOperations() : 0; totalWriteOperations += deviceStats.writeOperations() != -1 ? deviceStats.writeOperations() : 0; totalReadKilobytes += deviceStats.readKilobytes() != -1 ? deviceStats.readKilobytes() : 0; totalWriteKilobytes += deviceStats.writeKilobytes() != -1 ? deviceStats.writeKilobytes() : 0; + totalReadTime += deviceStats.readTime() != -1 ? deviceStats.readTime() : 0; + totalWriteTime += deviceStats.writeTime() != -1 ? deviceStats.writeTime() : 0; + totalQueueSize += deviceStats.queueSize() != -1 ? deviceStats.queueSize() : 0; + totalIOTimeInMillis += deviceStats.ioTimeInMillis() != -1 ? deviceStats.ioTimeInMillis() : 0; } this.totalOperations = totalOperations; this.totalReadOperations = totalReadOperations; this.totalWriteOperations = totalWriteOperations; this.totalReadKilobytes = totalReadKilobytes; this.totalWriteKilobytes = totalWriteKilobytes; + this.totalReadTime = totalReadTime; + this.totalWriteTime = totalWriteTime; + this.totalQueueSize = totalQueueSize; + this.totalIOTimeInMillis = totalIOTimeInMillis; } public IoStats(StreamInput in) throws IOException { @@ -414,6 +535,17 @@ public IoStats(StreamInput in) throws IOException { this.totalWriteOperations = in.readLong(); this.totalReadKilobytes = in.readLong(); this.totalWriteKilobytes = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + this.totalReadTime = in.readLong(); + this.totalWriteTime = in.readLong(); + this.totalQueueSize = in.readLong(); + this.totalIOTimeInMillis = in.readLong(); + } else { + this.totalReadTime = 0; + this.totalWriteTime = 0; + this.totalQueueSize = 0; + this.totalIOTimeInMillis = 0; + } } @Override @@ -427,6 +559,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(totalWriteOperations); out.writeLong(totalReadKilobytes); out.writeLong(totalWriteKilobytes); + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeLong(totalReadTime); + out.writeLong(totalWriteTime); + out.writeLong(totalQueueSize); + out.writeLong(totalIOTimeInMillis); + } } public DeviceStats[] getDevicesStats() { @@ -453,6 +591,34 @@ public long getTotalWriteKilobytes() { return totalWriteKilobytes; } + /** + * Sum of read time across all devices + */ + public long getTotalReadTime() { + return totalReadTime; + } + + /** + * Sum of write time across all devices + */ + public long getTotalWriteTime() { + return totalWriteTime; + } + + /** + * Sum of queue size across all devices + */ + public long getTotalQueueSize() { + return totalQueueSize; + } + + /** + * Sum of IO time across all devices + */ + public long getTotalIOTimeMillis() { + return totalIOTimeInMillis; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (devicesStats.length > 0) { @@ -470,11 +636,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(WRITE_OPERATIONS, totalWriteOperations); builder.field(READ_KILOBYTES, totalReadKilobytes); builder.field(WRITE_KILOBYTES, totalWriteKilobytes); + + builder.field(READ_TIME, totalReadTime); + builder.field(WRITE_TIME, totalWriteTime); + builder.field(QUEUE_SIZE, totalQueueSize); + builder.field(IO_TIME_MS, totalIOTimeInMillis); builder.endObject(); } return builder; } - } private final long timestamp; diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java index f39de883536be..0c3d412ec4218 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java @@ -109,6 +109,25 @@ final FsInfo.IoStats ioStats(final Set> devicesNumbers, List devicesStats = new ArrayList<>(); + /** + * The /proc/diskstats file displays the I/O statistics of block devices. + * Each line contains the following 14 fields: ( + additional fields ) + * + * 1 major number + * 2 minor number + * 3 device name + * 4 reads completed successfully + * 5 reads merged + * 6 sectors read + * 7 time spent reading (ms) + * 8 writes completed + * 9 writes merged + * 10 sectors written + * 11 time spent writing (ms) + * 12 I/Os currently in progress + * 13 time spent doing I/Os (ms) ---- IO use percent + * 14 weighted time spent doing I/Os (ms) ---- Queue size + */ List lines = readProcDiskStats(); if (!lines.isEmpty()) { for (String line : lines) { @@ -123,6 +142,12 @@ final FsInfo.IoStats ioStats(final Set> devicesNumbers, final long sectorsRead = Long.parseLong(fields[5]); final long writesCompleted = Long.parseLong(fields[7]); final long sectorsWritten = Long.parseLong(fields[9]); + // readTime and writeTime calculates the total read/write time taken for each request to complete + // ioTime calculates actual time queue and disks are busy + final long readTime = Long.parseLong(fields[6]); + final long writeTime = Long.parseLong(fields[10]); + final long ioTime = fields.length > 12 ? Long.parseLong(fields[12]) : 0; + final long queueSize = fields.length > 13 ? Long.parseLong(fields[13]) : 0; final FsInfo.DeviceStats deviceStats = new FsInfo.DeviceStats( majorDeviceNumber, minorDeviceNumber, @@ -131,6 +156,10 @@ final FsInfo.IoStats ioStats(final Set> devicesNumbers, sectorsRead, writesCompleted, sectorsWritten, + readTime, + writeTime, + queueSize, + ioTime, deviceMap.get(Tuple.tuple(majorDeviceNumber, minorDeviceNumber)) ); devicesStats.add(deviceStats); diff --git a/server/src/main/java/org/opensearch/monitor/os/OsProbe.java b/server/src/main/java/org/opensearch/monitor/os/OsProbe.java index 98229941252ba..a0a14372aa31a 100644 --- a/server/src/main/java/org/opensearch/monitor/os/OsProbe.java +++ b/server/src/main/java/org/opensearch/monitor/os/OsProbe.java @@ -59,13 +59,13 @@ /** * The {@link OsProbe} class retrieves information about the physical and swap size of the machine * memory, as well as the system load average and cpu load. - * + *

              * In some exceptional cases, it's possible the underlying native methods used by * {@link #getFreePhysicalMemorySize()}, {@link #getTotalPhysicalMemorySize()}, * {@link #getFreeSwapSpaceSize()}, and {@link #getTotalSwapSpaceSize()} can return a * negative value. Because of this, we prevent those methods from returning negative values, * returning 0 instead. - * + *

              * The OS can report a negative number in a number of cases: * - Non-supported OSes (HP-UX, or AIX) * - A failure of macOS to initialize host statistics @@ -183,11 +183,11 @@ public long getTotalSwapSpaceSize() { /** * The system load averages as an array. - * + *

              * On Windows, this method returns {@code null}. - * + *

              * On Linux, this method returns the 1, 5, and 15-minute load averages. - * + *

              * On macOS, this method should return the 1-minute load average. * * @return the available system load averages or {@code null} diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 85fccacb11ed5..96fdadb588efe 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -46,6 +46,7 @@ import org.opensearch.action.admin.cluster.snapshots.status.TransportNodesSnapshotsStatus; import org.opensearch.action.search.SearchExecutionStatsCollector; import org.opensearch.action.search.SearchPhaseController; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.action.search.SearchTransportService; import org.opensearch.action.support.TransportAction; import org.opensearch.action.update.UpdateHelper; @@ -61,6 +62,7 @@ import org.opensearch.cluster.InternalClusterInfoService; import org.opensearch.cluster.NodeConnectionsService; import org.opensearch.cluster.action.index.MappingUpdatedAction; +import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.metadata.AliasValidator; import org.opensearch.cluster.metadata.IndexTemplateMetadata; import org.opensearch.cluster.metadata.Metadata; @@ -114,6 +116,7 @@ import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.crypto.CryptoHandlerRegistry; import org.opensearch.discovery.Discovery; import org.opensearch.discovery.DiscoveryModule; import org.opensearch.env.Environment; @@ -127,14 +130,17 @@ import org.opensearch.gateway.GatewayService; import org.opensearch.gateway.MetaStateService; import org.opensearch.gateway.PersistedClusterStateService; +import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.http.HttpServerTransport; import org.opensearch.identity.IdentityService; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexingPressureService; +import org.opensearch.index.SegmentReplicationStatsTracker; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.engine.EngineFactory; import org.opensearch.index.recovery.RemoteStoreRestoreService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.filecache.FileCacheCleaner; @@ -161,6 +167,8 @@ import org.opensearch.monitor.fs.FsInfo; import org.opensearch.monitor.fs.FsProbe; import org.opensearch.monitor.jvm.JvmInfo; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.node.resource.tracker.NodeResourceUsageTracker; import org.opensearch.persistent.PersistentTasksClusterService; import org.opensearch.persistent.PersistentTasksExecutor; import org.opensearch.persistent.PersistentTasksExecutorRegistry; @@ -169,6 +177,8 @@ import org.opensearch.plugins.AnalysisPlugin; import org.opensearch.plugins.CircuitBreakerPlugin; import org.opensearch.plugins.ClusterPlugin; +import org.opensearch.plugins.CryptoKeyProviderPlugin; +import org.opensearch.plugins.CryptoPlugin; import org.opensearch.plugins.DiscoveryPlugin; import org.opensearch.plugins.EnginePlugin; import org.opensearch.plugins.ExtensionAwarePlugin; @@ -194,7 +204,6 @@ import org.opensearch.script.ScriptEngine; import org.opensearch.script.ScriptModule; import org.opensearch.script.ScriptService; -import org.opensearch.search.SearchBootstrapSettings; import org.opensearch.search.SearchModule; import org.opensearch.search.SearchService; import org.opensearch.search.aggregations.support.AggregationUsageService; @@ -217,6 +226,9 @@ import org.opensearch.tasks.consumer.TopNSearchTasksLogger; import org.opensearch.telemetry.TelemetryModule; import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.MetricsRegistryFactory; +import org.opensearch.telemetry.metrics.NoopMetricsRegistryFactory; import org.opensearch.telemetry.tracing.NoopTracerFactory; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.telemetry.tracing.TracerFactory; @@ -265,6 +277,8 @@ import static org.opensearch.common.util.FeatureFlags.TELEMETRY; import static org.opensearch.env.NodeEnvironment.collectFileCacheDataPath; import static org.opensearch.index.ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENABLED_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreAttributePresent; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; /** * A node represent a node within a cluster ({@code cluster.name}). The {@link #client()} can be used @@ -383,9 +397,12 @@ public static class DiscoverySettings { private final LocalNodeFactory localNodeFactory; private final NodeService nodeService; private final Tracer tracer; + + private final MetricsRegistry metricsRegistry; final NamedWriteableRegistry namedWriteableRegistry; private final AtomicReference runnableTaskListener; private FileCache fileCache; + private final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; public Node(Environment environment) { this(environment, Collections.emptyList(), true); @@ -469,7 +486,6 @@ protected Node( // Ensure to initialize Feature Flags via the settings from opensearch.yml FeatureFlags.initializeFeatureFlags(settings); - SearchBootstrapSettings.initialize(settings); final List identityPlugins = new ArrayList<>(); if (FeatureFlags.isEnabled(FeatureFlags.IDENTITY)) { @@ -518,12 +534,15 @@ protected Node( .collect(Collectors.toCollection(LinkedHashSet::new)) ); resourcesToClose.add(nodeEnvironment); - localNodeFactory = new LocalNodeFactory(settings, nodeEnvironment.nodeId()); final List> executorBuilders = pluginsService.getExecutorBuilders(settings); runnableTaskListener = new AtomicReference<>(); final ThreadPool threadPool = new ThreadPool(settings, runnableTaskListener, executorBuilders.toArray(new ExecutorBuilder[0])); + + final SetOnce repositoriesServiceReference = new SetOnce<>(); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService(repositoriesServiceReference::get, threadPool); + localNodeFactory = new LocalNodeFactory(settings, nodeEnvironment.nodeId(), remoteStoreNodeService); resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); final ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); resourcesToClose.add(resourceWatcherService); @@ -577,6 +596,37 @@ protected Node( new ConsistentSettingsService(settings, clusterService, consistentSettings).newHashPublisher() ); } + + TracerFactory tracerFactory; + MetricsRegistryFactory metricsRegistryFactory; + if (FeatureFlags.isEnabled(TELEMETRY)) { + final TelemetrySettings telemetrySettings = new TelemetrySettings(settings, clusterService.getClusterSettings()); + if (telemetrySettings.isTracingFeatureEnabled() || telemetrySettings.isMetricsFeatureEnabled()) { + List telemetryPlugins = pluginsService.filterPlugins(TelemetryPlugin.class); + TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, telemetrySettings); + if (telemetrySettings.isTracingFeatureEnabled()) { + tracerFactory = new TracerFactory(telemetrySettings, telemetryModule.getTelemetry(), threadPool.getThreadContext()); + } else { + tracerFactory = new NoopTracerFactory(); + } + if (telemetrySettings.isMetricsFeatureEnabled()) { + metricsRegistryFactory = new MetricsRegistryFactory(telemetrySettings, telemetryModule.getTelemetry()); + } else { + metricsRegistryFactory = new NoopMetricsRegistryFactory(); + } + } else { + tracerFactory = new NoopTracerFactory(); + metricsRegistryFactory = new NoopMetricsRegistryFactory(); + } + } else { + tracerFactory = new NoopTracerFactory(); + metricsRegistryFactory = new NoopMetricsRegistryFactory(); + } + + tracer = tracerFactory.getTracer(); + metricsRegistry = metricsRegistryFactory.getMetricsRegistry(); + resourcesToClose.add(tracer::close); + resourcesToClose.add(metricsRegistry::close); final IngestService ingestService = new IngestService( clusterService, threadPool, @@ -587,7 +637,6 @@ protected Node( client ); - final SetOnce repositoriesServiceReference = new SetOnce<>(); final ClusterInfoService clusterInfoService = newClusterInfoService(settings, clusterService, threadPool, client); final UsageService usageService = new UsageService(); @@ -671,6 +720,19 @@ protected Node( clusterService.getClusterSettings(), threadPool::relativeTimeInMillis ); + final RemoteClusterStateService remoteClusterStateService; + if (isRemoteStoreClusterStateEnabled(settings)) { + remoteClusterStateService = new RemoteClusterStateService( + nodeEnvironment.nodeId(), + repositoriesServiceReference::get, + settings, + clusterService.getClusterSettings(), + threadPool::preciseRelativeTimeInNanos, + threadPool + ); + } else { + remoteClusterStateService = null; + } // collect engine factory providers from plugins final Collection enginePlugins = pluginsService.filterPlugins(EnginePlugin.class); @@ -720,11 +782,16 @@ protected Node( rerouteServiceReference.set(rerouteService); clusterService.setRerouteService(rerouteService); + final RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); + final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( repositoriesServiceReference::get, threadPool ); + final SearchRequestStats searchRequestStats = new SearchRequestStats(); + + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, settings); final IndicesService indicesService = new IndicesService( settings, pluginsService, @@ -748,9 +815,11 @@ protected Node( recoveryStateFactories, remoteDirectoryFactory, repositoriesServiceReference::get, - fileCacheCleaner + fileCacheCleaner, + searchRequestStats, + remoteStoreStatsTrackerFactory, + recoverySettings ); - final AliasValidator aliasValidator = new AliasValidator(); final ShardLimitValidator shardLimitValidator = new ShardLimitValidator(settings, clusterService, systemIndices); @@ -833,7 +902,8 @@ protected Node( xContentRegistry, networkService, restController, - clusterService.getClusterSettings() + clusterService.getClusterSettings(), + tracer ); Collection>> indexTemplateMetadataUpgraders = pluginsService.filterPlugins( Plugin.class @@ -863,7 +933,8 @@ protected Node( networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings(), - taskHeaders + taskHeaders, + tracer ); TopNSearchTasksLogger taskConsumer = new TopNSearchTasksLogger(settings, settingsModule.getClusterSettings()); transportService.getTaskManager().registerTaskResourceConsumer(taskConsumer); @@ -875,6 +946,7 @@ protected Node( environment.settings(), client ); + final PersistedStateRegistry persistedStateRegistry = new PersistedStateRegistry(); final GatewayMetaState gatewayMetaState = new GatewayMetaState(); final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); final SearchTransportService searchTransportService = new SearchTransportService( @@ -905,7 +977,7 @@ protected Node( transportService.getTaskManager() ); - final RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); + final SegmentReplicationStatsTracker segmentReplicationStatsTracker = new SegmentReplicationStatsTracker(indicesService); RepositoriesModule repositoriesModule = new RepositoriesModule( this.environment, pluginsService.filterPlugins(RepositoryPlugin.class), @@ -915,6 +987,11 @@ protected Node( xContentRegistry, recoverySettings ); + CryptoHandlerRegistry.initRegistry( + pluginsService.filterPlugins(CryptoPlugin.class), + pluginsService.filterPlugins(CryptoKeyProviderPlugin.class), + settings + ); RepositoriesService repositoryService = repositoriesModule.getRepositoryService(); repositoriesServiceReference.set(repositoryService); SnapshotsService snapshotsService = new SnapshotsService( @@ -949,9 +1026,14 @@ protected Node( indicesService, clusterInfoService::getClusterInfo ); + RemoteStoreRestoreService remoteStoreRestoreService = new RemoteStoreRestoreService( clusterService, - clusterModule.getAllocationService() + clusterModule.getAllocationService(), + metadataCreateIndexService, + metadataIndexUpgradeService, + shardLimitValidator, + remoteClusterStateService ); final DiskThresholdMonitor diskThresholdMonitor = new DiskThresholdMonitor( @@ -978,7 +1060,9 @@ protected Node( environment.configFile(), gatewayMetaState, rerouteService, - fsHealthService + fsHealthService, + persistedStateRegistry, + remoteStoreNodeService ); final SearchPipelineService searchPipelineService = new SearchPipelineService( clusterService, @@ -1000,6 +1084,16 @@ protected Node( transportService.getTaskManager(), taskCancellationMonitoringSettings ); + final NodeResourceUsageTracker nodeResourceUsageTracker = new NodeResourceUsageTracker( + threadPool, + settings, + clusterService.getClusterSettings() + ); + final ResourceUsageCollectorService resourceUsageCollectorService = new ResourceUsageCollectorService( + nodeResourceUsageTracker, + clusterService, + threadPool + ); this.nodeService = new NodeService( settings, threadPool, @@ -1021,7 +1115,10 @@ protected Node( searchBackpressureService, searchPipelineService, fileCache, - taskCancellationMonitoringService + taskCancellationMonitoringService, + resourceUsageCollectorService, + segmentReplicationStatsTracker, + repositoryService ); final SearchService searchService = newSearchService( @@ -1037,18 +1134,6 @@ protected Node( searchModule.getIndexSearcherExecutor(threadPool) ); - TracerFactory tracerFactory; - if (FeatureFlags.isEnabled(TELEMETRY)) { - final TelemetrySettings telemetrySettings = new TelemetrySettings(settings, clusterService.getClusterSettings()); - List telemetryPlugins = pluginsService.filterPlugins(TelemetryPlugin.class); - TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, telemetrySettings); - tracerFactory = new TracerFactory(telemetrySettings, telemetryModule.getTelemetry(), threadPool.getThreadContext()); - } else { - tracerFactory = new NoopTracerFactory(); - } - tracer = tracerFactory.getTracer(); - resourcesToClose.add(tracer::close); - final List> tasksExecutors = pluginsService.filterPlugins(PersistentTaskPlugin.class) .stream() .map( @@ -1102,6 +1187,7 @@ protected Node( b.bind(MetaStateService.class).toInstance(metaStateService); b.bind(PersistedClusterStateService.class).toInstance(lucenePersistedStateFactory); b.bind(IndicesService.class).toInstance(indicesService); + b.bind(RemoteStoreStatsTrackerFactory.class).toInstance(remoteStoreStatsTrackerFactory); b.bind(AliasValidator.class).toInstance(aliasValidator); b.bind(MetadataCreateIndexService.class).toInstance(metadataCreateIndexService); b.bind(AwarenessReplicaBalance.class).toInstance(awarenessReplicaBalance); @@ -1153,9 +1239,16 @@ protected Node( b.bind(RerouteService.class).toInstance(rerouteService); b.bind(ShardLimitValidator.class).toInstance(shardLimitValidator); b.bind(FsHealthService.class).toInstance(fsHealthService); + b.bind(NodeResourceUsageTracker.class).toInstance(nodeResourceUsageTracker); + b.bind(ResourceUsageCollectorService.class).toInstance(resourceUsageCollectorService); b.bind(SystemIndices.class).toInstance(systemIndices); b.bind(IdentityService.class).toInstance(identityService); b.bind(Tracer.class).toInstance(tracer); + b.bind(SearchRequestStats.class).toInstance(searchRequestStats); + b.bind(MetricsRegistry.class).toInstance(metricsRegistry); + b.bind(RemoteClusterStateService.class).toProvider(() -> remoteClusterStateService); + b.bind(PersistedStateRegistry.class).toInstance(persistedStateRegistry); + b.bind(SegmentReplicationStatsTracker.class).toInstance(segmentReplicationStatsTracker); }); injector = modules.createInjector(); @@ -1208,9 +1301,10 @@ protected TransportService newTransportService( TransportInterceptor interceptor, Function localNodeFactory, ClusterSettings clusterSettings, - Set taskHeaders + Set taskHeaders, + Tracer tracer ) { - return new TransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); + return new TransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders, tracer); } protected void processRecoverySettings(ClusterSettings clusterSettings, RecoverySettings recoverySettings) { @@ -1264,6 +1358,8 @@ public Node start() throws NodeValidationException { injector.getInstance(RepositoriesService.class).start(); injector.getInstance(SearchService.class).start(); injector.getInstance(FsHealthService.class).start(); + injector.getInstance(NodeResourceUsageTracker.class).start(); + injector.getInstance(ResourceUsageCollectorService.class).start(); nodeService.getMonitorService().start(); nodeService.getSearchBackpressureService().start(); nodeService.getTaskCancellationMonitoringService().start(); @@ -1294,6 +1390,10 @@ public Node start() throws NodeValidationException { injector.getInstance(PeerRecoverySourceService.class).start(); injector.getInstance(SegmentReplicationSourceService.class).start(); + final RemoteClusterStateService remoteClusterStateService = injector.getInstance(RemoteClusterStateService.class); + if (remoteClusterStateService != null) { + remoteClusterStateService.start(); + } // Load (and maybe upgrade) the metadata stored on disk final GatewayMetaState gatewayMetaState = injector.getInstance(GatewayMetaState.class); gatewayMetaState.start( @@ -1303,7 +1403,10 @@ public Node start() throws NodeValidationException { injector.getInstance(MetaStateService.class), injector.getInstance(MetadataIndexUpgradeService.class), injector.getInstance(MetadataUpgrader.class), - injector.getInstance(PersistedClusterStateService.class) + injector.getInstance(PersistedClusterStateService.class), + injector.getInstance(RemoteClusterStateService.class), + injector.getInstance(PersistedStateRegistry.class), + injector.getInstance(RemoteStoreRestoreService.class) ); if (Assertions.ENABLED) { try { @@ -1419,6 +1522,8 @@ private Node stop() { injector.getInstance(ClusterService.class).stop(); injector.getInstance(NodeConnectionsService.class).stop(); injector.getInstance(FsHealthService.class).stop(); + injector.getInstance(NodeResourceUsageTracker.class).stop(); + injector.getInstance(ResourceUsageCollectorService.class).stop(); nodeService.getMonitorService().stop(); nodeService.getSearchBackpressureService().stop(); injector.getInstance(GatewayService.class).stop(); @@ -1482,6 +1587,10 @@ public synchronized void close() throws IOException { toClose.add(nodeService.getSearchBackpressureService()); toClose.add(() -> stopWatch.stop().start("fsHealth")); toClose.add(injector.getInstance(FsHealthService.class)); + toClose.add(() -> stopWatch.stop().start("resource_usage_tracker")); + toClose.add(injector.getInstance(NodeResourceUsageTracker.class)); + toClose.add(() -> stopWatch.stop().start("resource_usage_collector")); + toClose.add(injector.getInstance(ResourceUsageCollectorService.class)); toClose.add(() -> stopWatch.stop().start("gateway")); toClose.add(injector.getInstance(GatewayService.class)); toClose.add(() -> stopWatch.stop().start("search")); @@ -1513,6 +1622,7 @@ public synchronized void close() throws IOException { toClose.add(stopWatch::stop); if (FeatureFlags.isEnabled(TELEMETRY)) { toClose.add(injector.getInstance(Tracer.class)); + toClose.add(injector.getInstance(MetricsRegistry.class)); } if (logger.isTraceEnabled()) { @@ -1712,15 +1822,28 @@ private static class LocalNodeFactory implements Function localNode = new SetOnce<>(); private final String persistentNodeId; private final Settings settings; + private final RemoteStoreNodeService remoteStoreNodeService; - private LocalNodeFactory(Settings settings, String persistentNodeId) { + private LocalNodeFactory(Settings settings, String persistentNodeId, RemoteStoreNodeService remoteStoreNodeService) { this.persistentNodeId = persistentNodeId; this.settings = settings; + this.remoteStoreNodeService = remoteStoreNodeService; } @Override public DiscoveryNode apply(BoundTransportAddress boundTransportAddress) { - localNode.set(DiscoveryNode.createLocal(settings, boundTransportAddress.publishAddress(), persistentNodeId)); + if (isRemoteStoreAttributePresent(settings)) { + localNode.set( + DiscoveryNode.createRemoteNodeLocal( + settings, + boundTransportAddress.publishAddress(), + persistentNodeId, + remoteStoreNodeService + ) + ); + } else { + localNode.set(DiscoveryNode.createLocal(settings, boundTransportAddress.publishAddress(), persistentNodeId)); + } return localNode.get(); } diff --git a/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java b/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java new file mode 100644 index 0000000000000..6ef66d4ac1914 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/NodeResourceUsageStats.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Locale; + +/** + * This represents the resource usage stats of a node along with the timestamp at which the stats object was created + * in the respective node + */ +public class NodeResourceUsageStats implements Writeable { + final String nodeId; + long timestamp; + double cpuUtilizationPercent; + double memoryUtilizationPercent; + + public NodeResourceUsageStats(String nodeId, long timestamp, double memoryUtilizationPercent, double cpuUtilizationPercent) { + this.nodeId = nodeId; + this.timestamp = timestamp; + this.cpuUtilizationPercent = cpuUtilizationPercent; + this.memoryUtilizationPercent = memoryUtilizationPercent; + } + + public NodeResourceUsageStats(StreamInput in) throws IOException { + this.nodeId = in.readString(); + this.timestamp = in.readLong(); + this.cpuUtilizationPercent = in.readDouble(); + this.memoryUtilizationPercent = in.readDouble(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(this.nodeId); + out.writeLong(this.timestamp); + out.writeDouble(this.cpuUtilizationPercent); + out.writeDouble(this.memoryUtilizationPercent); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("NodeResourceUsageStats["); + sb.append(nodeId).append("]("); + sb.append("Timestamp: ").append(timestamp); + sb.append(", CPU utilization percent: ").append(String.format(Locale.ROOT, "%.1f", cpuUtilizationPercent)); + sb.append(", Memory utilization percent: ").append(String.format(Locale.ROOT, "%.1f", memoryUtilizationPercent)); + sb.append(")"); + return sb.toString(); + } + + NodeResourceUsageStats(NodeResourceUsageStats nodeResourceUsageStats) { + this( + nodeResourceUsageStats.nodeId, + nodeResourceUsageStats.timestamp, + nodeResourceUsageStats.memoryUtilizationPercent, + nodeResourceUsageStats.cpuUtilizationPercent + ); + } + + public double getMemoryUtilizationPercent() { + return memoryUtilizationPercent; + } + + public double getCpuUtilizationPercent() { + return cpuUtilizationPercent; + } + + public long getTimestamp() { + return timestamp; + } +} diff --git a/server/src/main/java/org/opensearch/node/NodeService.java b/server/src/main/java/org/opensearch/node/NodeService.java index 2688b894cb9a7..325683b80394a 100644 --- a/server/src/main/java/org/opensearch/node/NodeService.java +++ b/server/src/main/java/org/opensearch/node/NodeService.java @@ -48,11 +48,13 @@ import org.opensearch.discovery.Discovery; import org.opensearch.http.HttpServerTransport; import org.opensearch.index.IndexingPressureService; +import org.opensearch.index.SegmentReplicationStatsTracker; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.indices.IndicesService; import org.opensearch.ingest.IngestService; import org.opensearch.monitor.MonitorService; import org.opensearch.plugins.PluginsService; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.AggregationUsageService; import org.opensearch.search.backpressure.SearchBackpressureService; @@ -83,6 +85,7 @@ public class NodeService implements Closeable { private final ScriptService scriptService; private final HttpServerTransport httpServerTransport; private final ResponseCollectorService responseCollectorService; + private final ResourceUsageCollectorService resourceUsageCollectorService; private final SearchTransportService searchTransportService; private final IndexingPressureService indexingPressureService; private final AggregationUsageService aggregationUsageService; @@ -92,6 +95,8 @@ public class NodeService implements Closeable { private final Discovery discovery; private final FileCache fileCache; private final TaskCancellationMonitoringService taskCancellationMonitoringService; + private final RepositoriesService repositoriesService; + private final SegmentReplicationStatsTracker segmentReplicationStatsTracker; NodeService( Settings settings, @@ -114,7 +119,10 @@ public class NodeService implements Closeable { SearchBackpressureService searchBackpressureService, SearchPipelineService searchPipelineService, FileCache fileCache, - TaskCancellationMonitoringService taskCancellationMonitoringService + TaskCancellationMonitoringService taskCancellationMonitoringService, + ResourceUsageCollectorService resourceUsageCollectorService, + SegmentReplicationStatsTracker segmentReplicationStatsTracker, + RepositoriesService repositoriesService ) { this.settings = settings; this.threadPool = threadPool; @@ -137,8 +145,11 @@ public class NodeService implements Closeable { this.clusterService = clusterService; this.fileCache = fileCache; this.taskCancellationMonitoringService = taskCancellationMonitoringService; + this.resourceUsageCollectorService = resourceUsageCollectorService; + this.repositoriesService = repositoriesService; clusterService.addStateApplier(ingestService); clusterService.addStateApplier(searchPipelineService); + this.segmentReplicationStatsTracker = segmentReplicationStatsTracker; } public NodeInfo info( @@ -217,7 +228,10 @@ public NodeStats stats( boolean weightedRoutingStats, boolean fileCacheStats, boolean taskCancellation, - boolean searchPipelineStats + boolean searchPipelineStats, + boolean resourceUsageStats, + boolean segmentReplicationTrackerStats, + boolean repositoriesStats ) { // for indices stats we want to include previous allocated shards stats as well (it will // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) @@ -237,6 +251,7 @@ public NodeStats stats( discoveryStats ? discovery.stats() : null, ingest ? ingestService.stats() : null, adaptiveSelection ? responseCollectorService.getAdaptiveStats(searchTransportService.getPendingSearchRequests()) : null, + resourceUsageStats ? resourceUsageCollectorService.stats() : null, scriptCache ? scriptService.cacheStats() : null, indexingPressure ? this.indexingPressureService.nodeStats() : null, shardIndexingPressure ? this.indexingPressureService.shardStats(indices) : null, @@ -245,7 +260,9 @@ public NodeStats stats( weightedRoutingStats ? WeightedRoutingStats.getInstance() : null, fileCacheStats && fileCache != null ? fileCache.fileCacheStats() : null, taskCancellation ? this.taskCancellationMonitoringService.stats() : null, - searchPipelineStats ? this.searchPipelineService.stats() : null + searchPipelineStats ? this.searchPipelineService.stats() : null, + segmentReplicationTrackerStats ? this.segmentReplicationStatsTracker.getTotalRejectionStats() : null, + repositoriesStats ? this.repositoriesService.getRepositoriesStats() : null ); } diff --git a/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java b/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java new file mode 100644 index 0000000000000..3dff9a27f71a8 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/NodesResourceUsageStats.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Locale; +import java.util.Map; + +/** + * This class represents resource usage stats such as CPU, Memory and IO resource usage of each node along with the + * timestamp of the stats recorded. + */ +public class NodesResourceUsageStats implements Writeable, ToXContentFragment { + + // Map of node id to resource usage stats of the corresponding node. + private final Map nodeIdToResourceUsageStatsMap; + + public NodesResourceUsageStats(Map nodeIdToResourceUsageStatsMap) { + this.nodeIdToResourceUsageStatsMap = nodeIdToResourceUsageStatsMap; + } + + public NodesResourceUsageStats(StreamInput in) throws IOException { + this.nodeIdToResourceUsageStatsMap = in.readMap(StreamInput::readString, NodeResourceUsageStats::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(this.nodeIdToResourceUsageStatsMap, StreamOutput::writeString, (stream, stats) -> stats.writeTo(stream)); + } + + /** + * Returns map of node id to resource usage stats of the corresponding node. + */ + public Map getNodeIdToResourceUsageStatsMap() { + return nodeIdToResourceUsageStatsMap; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("resource_usage_stats"); + for (String nodeId : nodeIdToResourceUsageStatsMap.keySet()) { + builder.startObject(nodeId); + NodeResourceUsageStats resourceUsageStats = nodeIdToResourceUsageStatsMap.get(nodeId); + if (resourceUsageStats != null) { + builder.field("timestamp", resourceUsageStats.timestamp); + builder.field("cpu_utilization_percent", String.format(Locale.ROOT, "%.1f", resourceUsageStats.cpuUtilizationPercent)); + builder.field( + "memory_utilization_percent", + String.format(Locale.ROOT, "%.1f", resourceUsageStats.memoryUtilizationPercent) + ); + } + builder.endObject(); + } + builder.endObject(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java b/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java new file mode 100644 index 0000000000000..f1c763e09f147 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/ResourceUsageCollectorService.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterStateListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.node.resource.tracker.NodeResourceUsageTracker; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.ThreadPool; + +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentMap; + +/** + * This collects node level resource usage statistics such as cpu, memory, IO of each node and makes it available for + * coordinator node to aid in throttling, ranking etc + */ +public class ResourceUsageCollectorService extends AbstractLifecycleComponent implements ClusterStateListener { + + /** + * This refresh interval denotes the polling interval of ResourceUsageCollectorService to refresh the resource usage + * stats from local node + */ + private static long REFRESH_INTERVAL_IN_MILLIS = 1000; + + private static final Logger logger = LogManager.getLogger(ResourceUsageCollectorService.class); + private final ConcurrentMap nodeIdToResourceUsageStats = ConcurrentCollections.newConcurrentMap(); + + private ThreadPool threadPool; + private volatile Scheduler.Cancellable scheduledFuture; + + private NodeResourceUsageTracker nodeResourceUsageTracker; + private ClusterService clusterService; + + public ResourceUsageCollectorService( + NodeResourceUsageTracker nodeResourceUsageTracker, + ClusterService clusterService, + ThreadPool threadPool + ) { + this.threadPool = threadPool; + this.nodeResourceUsageTracker = nodeResourceUsageTracker; + this.clusterService = clusterService; + clusterService.addListener(this); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + if (event.nodesRemoved()) { + for (DiscoveryNode removedNode : event.nodesDelta().removedNodes()) { + removeNodeResourceUsageStats(removedNode.getId()); + } + } + } + + void removeNodeResourceUsageStats(String nodeId) { + nodeIdToResourceUsageStats.remove(nodeId); + } + + /** + * Collect node resource usage stats along with the timestamp + */ + public void collectNodeResourceUsageStats( + String nodeId, + long timestamp, + double memoryUtilizationPercent, + double cpuUtilizationPercent + ) { + nodeIdToResourceUsageStats.compute(nodeId, (id, resourceUsageStats) -> { + if (resourceUsageStats == null) { + return new NodeResourceUsageStats(nodeId, timestamp, memoryUtilizationPercent, cpuUtilizationPercent); + } else { + resourceUsageStats.cpuUtilizationPercent = cpuUtilizationPercent; + resourceUsageStats.memoryUtilizationPercent = memoryUtilizationPercent; + resourceUsageStats.timestamp = timestamp; + return resourceUsageStats; + } + }); + } + + /** + * Get all node resource usage statistics which will be used for node stats + */ + public Map getAllNodeStatistics() { + Map nodeStats = new HashMap<>(nodeIdToResourceUsageStats.size()); + nodeIdToResourceUsageStats.forEach((nodeId, resourceUsageStats) -> { + nodeStats.put(nodeId, new NodeResourceUsageStats(resourceUsageStats)); + }); + return nodeStats; + } + + /** + * Optionally return a {@code NodeResourceUsageStats} for the given nodeid, if + * resource usage stats information exists for the given node. Returns an empty + * {@code Optional} if the node was not found. + */ + public Optional getNodeStatistics(final String nodeId) { + return Optional.ofNullable(nodeIdToResourceUsageStats.get(nodeId)) + .map(resourceUsageStats -> new NodeResourceUsageStats(resourceUsageStats)); + } + + /** + * Returns collected resource usage statistics of all nodes + */ + public NodesResourceUsageStats stats() { + return new NodesResourceUsageStats(getAllNodeStatistics()); + } + + /** + * Fetch local node resource usage statistics and add it to store along with the current timestamp + */ + private void collectLocalNodeResourceUsageStats() { + if (nodeResourceUsageTracker.isReady() && clusterService.state() != null) { + collectNodeResourceUsageStats( + clusterService.state().nodes().getLocalNodeId(), + System.currentTimeMillis(), + nodeResourceUsageTracker.getMemoryUtilizationPercent(), + nodeResourceUsageTracker.getCpuUtilizationPercent() + ); + } + } + + @Override + protected void doStart() { + /** + * Fetch local node resource usage statistics every second + */ + scheduledFuture = threadPool.scheduleWithFixedDelay(() -> { + try { + collectLocalNodeResourceUsageStats(); + } catch (Exception e) { + logger.warn("failure in ResourceUsageCollectorService", e); + } + }, new TimeValue(REFRESH_INTERVAL_IN_MILLIS), ThreadPool.Names.GENERIC); + } + + @Override + protected void doStop() { + if (scheduledFuture != null) { + scheduledFuture.cancel(); + } + } + + @Override + protected void doClose() {} +} diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java new file mode 100644 index 0000000000000..7b2a6c34d3db6 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java @@ -0,0 +1,192 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.remotestore; + +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.Settings; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.node.Node; +import org.opensearch.repositories.blobstore.BlobStoreRepository; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * This is an abstraction for validating and storing information specific to remote backed storage nodes. + * + * @opensearch.internal + */ +public class RemoteStoreNodeAttribute { + + public static final String REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX = "remote_store"; + public static final String REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.segment.repository"; + public static final String REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.translog.repository"; + public static final String REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.state.repository"; + public static final String REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT = "remote_store.repository.%s.type"; + public static final String REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT = "remote_store.repository.%s." + + CryptoMetadata.CRYPTO_METADATA_KEY; + public static final String REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX = REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT + + "." + + CryptoMetadata.SETTINGS_KEY; + public static final String REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX = "remote_store.repository.%s.settings."; + private final RepositoriesMetadata repositoriesMetadata; + + /** + * Creates a new {@link RemoteStoreNodeAttribute} + */ + public RemoteStoreNodeAttribute(DiscoveryNode node) { + this.repositoriesMetadata = buildRepositoriesMetadata(node); + } + + private String validateAttributeNonNull(DiscoveryNode node, String attributeKey) { + String attributeValue = node.getAttributes().get(attributeKey); + if (attributeValue == null || attributeValue.isEmpty()) { + throw new IllegalStateException("joining node [" + node + "] doesn't have the node attribute [" + attributeKey + "]"); + } + + return attributeValue; + } + + private CryptoMetadata buildCryptoMetadata(DiscoveryNode node, String repositoryName) { + String metadataKey = String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT, repositoryName); + boolean isRepoEncrypted = node.getAttributes().keySet().stream().anyMatch(key -> key.startsWith(metadataKey)); + if (isRepoEncrypted == false) { + return null; + } + + String keyProviderName = validateAttributeNonNull(node, metadataKey + "." + CryptoMetadata.KEY_PROVIDER_NAME_KEY); + String keyProviderType = validateAttributeNonNull(node, metadataKey + "." + CryptoMetadata.KEY_PROVIDER_TYPE_KEY); + + String settingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX, + repositoryName + ); + + Map settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix + ".", ""), key -> node.getAttributes().get(key))); + + Settings.Builder settings = Settings.builder(); + settingsMap.forEach(settings::put); + + return new CryptoMetadata(keyProviderName, keyProviderType, settings.build()); + } + + private Map validateSettingsAttributesNonNull(DiscoveryNode node, String repositoryName) { + String settingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + repositoryName + ); + Map settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> validateAttributeNonNull(node, key))); + + if (settingsMap.isEmpty()) { + throw new IllegalStateException( + "joining node [" + node + "] doesn't have settings attribute for [" + repositoryName + "] repository" + ); + } + + return settingsMap; + } + + private RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name) { + String type = validateAttributeNonNull( + node, + String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, name) + ); + Map settingsMap = validateSettingsAttributesNonNull(node, name); + + Settings.Builder settings = Settings.builder(); + settingsMap.forEach(settings::put); + + CryptoMetadata cryptoMetadata = buildCryptoMetadata(node, name); + + // Repository metadata built here will always be for a system repository. + settings.put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true); + + return new RepositoryMetadata(name, type, settings.build(), cryptoMetadata); + } + + private RepositoriesMetadata buildRepositoriesMetadata(DiscoveryNode node) { + List repositoryMetadataList = new ArrayList<>(); + Set repositoryNames = new HashSet<>(); + + repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY)); + repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY)); + repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY)); + + for (String repositoryName : repositoryNames) { + repositoryMetadataList.add(buildRepositoryMetadata(node, repositoryName)); + } + + return new RepositoriesMetadata(repositoryMetadataList); + } + + public static boolean isRemoteStoreAttributePresent(Settings settings) { + return settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX).isEmpty() == false; + } + + public static boolean isRemoteStoreClusterStateEnabled(Settings settings) { + return RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.get(settings) && isRemoteStoreAttributePresent(settings); + } + + public RepositoriesMetadata getRepositoriesMetadata() { + return this.repositoriesMetadata; + } + + @Override + public int hashCode() { + // The hashCode is generated by computing the hash of all the repositoryMetadata present in + // repositoriesMetadata without generation. Below is the modified list hashCode generation logic. + + int hashCode = 1; + Iterator iterator = this.repositoriesMetadata.repositories().iterator(); + while (iterator.hasNext()) { + RepositoryMetadata repositoryMetadata = (RepositoryMetadata) iterator.next(); + hashCode = 31 * hashCode + (repositoryMetadata == null + ? 0 + : Objects.hash(repositoryMetadata.name(), repositoryMetadata.type(), repositoryMetadata.settings())); + } + return hashCode; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RemoteStoreNodeAttribute that = (RemoteStoreNodeAttribute) o; + + return this.getRepositoriesMetadata().equalsIgnoreGenerations(that.getRepositoriesMetadata()); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('{').append(this.repositoriesMetadata).append('}'); + return super.toString(); + } +} diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java new file mode 100644 index 0000000000000..ca2413a057a6b --- /dev/null +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java @@ -0,0 +1,169 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.remotestore; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.Setting; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryException; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.function.Supplier; + +/** + * Contains all the method needed for a remote store backed node lifecycle. + */ +public class RemoteStoreNodeService { + + private static final Logger logger = LogManager.getLogger(RemoteStoreNodeService.class); + private final Supplier repositoriesService; + private final ThreadPool threadPool; + public static final Setting REMOTE_STORE_COMPATIBILITY_MODE_SETTING = new Setting<>( + "remote_store.compatibility_mode", + CompatibilityMode.STRICT.name(), + CompatibilityMode::parseString, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Node join compatibility mode introduced with remote backed storage. + * + * @opensearch.internal + */ + public enum CompatibilityMode { + STRICT("strict"); + + public final String mode; + + CompatibilityMode(String mode) { + this.mode = mode; + } + + public static CompatibilityMode parseString(String compatibilityMode) { + try { + return CompatibilityMode.valueOf(compatibilityMode.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException( + "[" + + compatibilityMode + + "] compatibility mode is not supported. " + + "supported modes are [" + + CompatibilityMode.values().toString() + + "]" + ); + } + } + } + + public RemoteStoreNodeService(Supplier repositoriesService, ThreadPool threadPool) { + this.repositoriesService = repositoriesService; + this.threadPool = threadPool; + } + + /** + * Creates a repository during a node startup and performs verification by invoking verify method against + * mentioned repository. This verification will happen on a local node to validate if the node is able to connect + * to the repository with appropriate permissions. + * If the creation or verification fails this will close all the repositories this method created and throw + * exception. + */ + public void createAndVerifyRepositories(DiscoveryNode localNode) { + RemoteStoreNodeAttribute nodeAttribute = new RemoteStoreNodeAttribute(localNode); + RepositoriesService reposService = repositoriesService.get(); + Map repositories = new HashMap<>(); + for (RepositoryMetadata repositoryMetadata : nodeAttribute.getRepositoriesMetadata().repositories()) { + String repositoryName = repositoryMetadata.name(); + Repository repository; + RepositoriesService.validate(repositoryName); + + // Create Repository + repository = reposService.createRepository(repositoryMetadata); + logger.info( + "remote backed storage repository with name [{}] and type [{}] created", + repository.getMetadata().name(), + repository.getMetadata().type() + ); + + // Verify Repository + String verificationToken = repository.startVerification(); + repository.verify(verificationToken, localNode); + repository.endVerification(verificationToken); + logger.info(() -> new ParameterizedMessage("successfully verified [{}] repository", repositoryName)); + repositories.put(repositoryName, repository); + } + // Updating the repositories map in RepositoriesService + reposService.updateRepositoriesMap(repositories); + } + + /** + * Updates repositories metadata in the cluster state if not already present. If a repository metadata for a + * repository is already present in the cluster state and if it's different then the joining remote store backed + * node repository metadata an exception will be thrown and the node will not be allowed to join the cluster. + */ + public RepositoriesMetadata updateRepositoriesMetadata(DiscoveryNode joiningNode, RepositoriesMetadata existingRepositories) { + if (joiningNode.isRemoteStoreNode()) { + List updatedRepositoryMetadataList = new ArrayList<>(); + List newRepositoryMetadataList = new RemoteStoreNodeAttribute(joiningNode).getRepositoriesMetadata() + .repositories(); + + if (existingRepositories == null) { + return new RepositoriesMetadata(newRepositoryMetadataList); + } else { + updatedRepositoryMetadataList.addAll(existingRepositories.repositories()); + } + + for (RepositoryMetadata newRepositoryMetadata : newRepositoryMetadataList) { + boolean repositoryAlreadyPresent = false; + for (RepositoryMetadata existingRepositoryMetadata : existingRepositories.repositories()) { + if (newRepositoryMetadata.name().equals(existingRepositoryMetadata.name())) { + try { + // This will help in handling two scenarios - + // 1. When a fresh cluster is formed and a node tries to join the cluster, the repository + // metadata constructed from the node attributes of the joining node will be validated + // against the repository information provided by existing nodes in cluster state. + // 2. It's possible to update repository settings except the restricted ones post the + // creation of a system repository and if a node drops we will need to allow it to join + // even if the non-restricted system repository settings are now different. + repositoriesService.get().ensureValidSystemRepositoryUpdate(newRepositoryMetadata, existingRepositoryMetadata); + newRepositoryMetadata = existingRepositoryMetadata; + repositoryAlreadyPresent = true; + break; + } catch (RepositoryException e) { + throw new IllegalStateException( + "new repository metadata [" + + newRepositoryMetadata + + "] supplied by joining node is different from existing repository metadata [" + + existingRepositoryMetadata + + "]." + ); + } + } + } + if (repositoryAlreadyPresent == false) { + updatedRepositoryMetadataList.add(newRepositoryMetadata); + } + } + return new RepositoriesMetadata(updatedRepositoryMetadataList); + } else { + return existingRepositories; + } + } +} diff --git a/server/src/main/java/org/opensearch/node/remotestore/package-info.java b/server/src/main/java/org/opensearch/node/remotestore/package-info.java new file mode 100644 index 0000000000000..e2592aa5fcc29 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/remotestore/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Restore remote store transport handler. */ +package org.opensearch.node.remotestore; diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java new file mode 100644 index 0000000000000..f83a1b7f9fc05 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AbstractAverageUsageTracker.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.MovingAverage; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.ThreadPool; + +import java.util.concurrent.atomic.AtomicReference; + +/** + * Base class for sliding window resource usage trackers + */ +public abstract class AbstractAverageUsageTracker extends AbstractLifecycleComponent { + private static final Logger LOGGER = LogManager.getLogger(AbstractAverageUsageTracker.class); + + private final ThreadPool threadPool; + private final TimeValue pollingInterval; + private TimeValue windowDuration; + private final AtomicReference observations = new AtomicReference<>(); + + private volatile Scheduler.Cancellable scheduledFuture; + + public AbstractAverageUsageTracker(ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { + this.threadPool = threadPool; + this.pollingInterval = pollingInterval; + this.windowDuration = windowDuration; + this.setWindowSize(windowDuration); + } + + public abstract long getUsage(); + + /** + * Returns the moving average of the datapoints + */ + public double getAverage() { + return observations.get().getAverage(); + } + + /** + * Checks if we have datapoints more than or equal to the window size + */ + public boolean isReady() { + return observations.get().isReady(); + } + + /** + * Creates a new instance of MovingAverage with a new window size based on WindowDuration + */ + public void setWindowSize(TimeValue windowDuration) { + this.windowDuration = windowDuration; + int windowSize = (int) (windowDuration.nanos() / pollingInterval.nanos()); + LOGGER.debug("updated window size: {}", windowSize); + observations.set(new MovingAverage(windowSize)); + } + + public TimeValue getPollingInterval() { + return pollingInterval; + } + + public TimeValue getWindowDuration() { + return windowDuration; + } + + public long getWindowSize() { + return observations.get().getCount(); + } + + public void recordUsage(long usage) { + observations.get().record(usage); + } + + @Override + protected void doStart() { + scheduledFuture = threadPool.scheduleWithFixedDelay(() -> { + long usage = getUsage(); + recordUsage(usage); + }, pollingInterval, ThreadPool.Names.GENERIC); + } + + @Override + protected void doStop() { + if (scheduledFuture != null) { + scheduledFuture.cancel(); + } + } + + @Override + protected void doClose() {} +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AverageCpuUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AverageCpuUsageTracker.java new file mode 100644 index 0000000000000..160d385762eb0 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AverageCpuUsageTracker.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.monitor.process.ProcessProbe; +import org.opensearch.threadpool.ThreadPool; + +/** + * AverageCpuUsageTracker tracks the average CPU usage by polling the CPU usage every (pollingInterval) + * and keeping track of the rolling average over a defined time window (windowDuration). + */ +public class AverageCpuUsageTracker extends AbstractAverageUsageTracker { + private static final Logger LOGGER = LogManager.getLogger(AverageCpuUsageTracker.class); + + public AverageCpuUsageTracker(ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { + super(threadPool, pollingInterval, windowDuration); + } + + /** + * Returns the process CPU usage in percent + */ + @Override + public long getUsage() { + long usage = ProcessProbe.getInstance().getProcessCpuPercent(); + LOGGER.debug("Recording cpu usage: {}%", usage); + return usage; + } + +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/AverageMemoryUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/AverageMemoryUsageTracker.java new file mode 100644 index 0000000000000..c1d1c83656859 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/AverageMemoryUsageTracker.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.threadpool.ThreadPool; + +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryMXBean; + +/** + * AverageMemoryUsageTracker tracks the average JVM usage by polling the JVM usage every (pollingInterval) + * and keeping track of the rolling average over a defined time window (windowDuration). + */ +public class AverageMemoryUsageTracker extends AbstractAverageUsageTracker { + + private static final Logger LOGGER = LogManager.getLogger(AverageMemoryUsageTracker.class); + + private static final MemoryMXBean MEMORY_MX_BEAN = ManagementFactory.getMemoryMXBean(); + + public AverageMemoryUsageTracker(ThreadPool threadPool, TimeValue pollingInterval, TimeValue windowDuration) { + super(threadPool, pollingInterval, windowDuration); + } + + /** + * Get current memory usage percentage calculated against max heap memory + */ + @Override + public long getUsage() { + long usage = MEMORY_MX_BEAN.getHeapMemoryUsage().getUsed() * 100 / MEMORY_MX_BEAN.getHeapMemoryUsage().getMax(); + LOGGER.debug("Recording memory usage: {}%", usage); + return usage; + } +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java b/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java new file mode 100644 index 0000000000000..cf5f38c1b004c --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/NodeResourceUsageTracker.java @@ -0,0 +1,118 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.threadpool.ThreadPool; + +/** + * This tracks the usage of node resources such as CPU, IO and memory + */ +public class NodeResourceUsageTracker extends AbstractLifecycleComponent { + private ThreadPool threadPool; + private final ClusterSettings clusterSettings; + private AverageCpuUsageTracker cpuUsageTracker; + private AverageMemoryUsageTracker memoryUsageTracker; + + private ResourceTrackerSettings resourceTrackerSettings; + + public NodeResourceUsageTracker(ThreadPool threadPool, Settings settings, ClusterSettings clusterSettings) { + this.threadPool = threadPool; + this.clusterSettings = clusterSettings; + this.resourceTrackerSettings = new ResourceTrackerSettings(settings); + initialize(); + } + + /** + * Return CPU utilization average if we have enough datapoints, otherwise return 0 + */ + public double getCpuUtilizationPercent() { + if (cpuUsageTracker.isReady()) { + return cpuUsageTracker.getAverage(); + } + return 0.0; + } + + /** + * Return memory utilization average if we have enough datapoints, otherwise return 0 + */ + public double getMemoryUtilizationPercent() { + if (memoryUsageTracker.isReady()) { + return memoryUsageTracker.getAverage(); + } + return 0.0; + } + + /** + * Checks if all of the resource usage trackers are ready + */ + public boolean isReady() { + return memoryUsageTracker.isReady() && cpuUsageTracker.isReady(); + } + + void initialize() { + cpuUsageTracker = new AverageCpuUsageTracker( + threadPool, + resourceTrackerSettings.getCpuPollingInterval(), + resourceTrackerSettings.getCpuWindowDuration() + ); + clusterSettings.addSettingsUpdateConsumer( + ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING, + this::setCpuWindowDuration + ); + + memoryUsageTracker = new AverageMemoryUsageTracker( + threadPool, + resourceTrackerSettings.getMemoryPollingInterval(), + resourceTrackerSettings.getMemoryWindowDuration() + ); + clusterSettings.addSettingsUpdateConsumer( + ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING, + this::setMemoryWindowDuration + ); + } + + private void setMemoryWindowDuration(TimeValue windowDuration) { + memoryUsageTracker.setWindowSize(windowDuration); + resourceTrackerSettings.setMemoryWindowDuration(windowDuration); + } + + private void setCpuWindowDuration(TimeValue windowDuration) { + cpuUsageTracker.setWindowSize(windowDuration); + resourceTrackerSettings.setCpuWindowDuration(windowDuration); + } + + /** + * Visible for testing + */ + ResourceTrackerSettings getResourceTrackerSettings() { + return resourceTrackerSettings; + } + + @Override + protected void doStart() { + cpuUsageTracker.doStart(); + memoryUsageTracker.doStart(); + } + + @Override + protected void doStop() { + cpuUsageTracker.doStop(); + memoryUsageTracker.doStop(); + } + + @Override + protected void doClose() { + cpuUsageTracker.doClose(); + memoryUsageTracker.doClose(); + } +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java b/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java new file mode 100644 index 0000000000000..f81b008ba7e8b --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/ResourceTrackerSettings.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; + +/** + * Settings related to resource usage trackers such as polling interval, window duration etc + */ +public class ResourceTrackerSettings { + + private static class Defaults { + /** + * This is the default polling interval of usage trackers to get the resource utilization data + */ + private static final long POLLING_INTERVAL_IN_MILLIS = 500; + /** + * This is the default window duration on which the average resource utilization values will be calculated + */ + private static final long WINDOW_DURATION_IN_SECONDS = 30; + } + + public static final Setting GLOBAL_CPU_USAGE_AC_POLLING_INTERVAL_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_cpu_usage.polling_interval", + TimeValue.timeValueMillis(Defaults.POLLING_INTERVAL_IN_MILLIS), + Setting.Property.NodeScope + ); + public static final Setting GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_cpu_usage.window_duration", + TimeValue.timeValueSeconds(Defaults.WINDOW_DURATION_IN_SECONDS), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + public static final Setting GLOBAL_JVM_USAGE_AC_POLLING_INTERVAL_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_jvmmp.polling_interval", + TimeValue.timeValueMillis(Defaults.POLLING_INTERVAL_IN_MILLIS), + Setting.Property.NodeScope + ); + + public static final Setting GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING = Setting.positiveTimeSetting( + "node.resource.tracker.global_jvmmp.window_duration", + TimeValue.timeValueSeconds(Defaults.WINDOW_DURATION_IN_SECONDS), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + private volatile TimeValue cpuWindowDuration; + private volatile TimeValue cpuPollingInterval; + private volatile TimeValue memoryWindowDuration; + private volatile TimeValue memoryPollingInterval; + + public ResourceTrackerSettings(Settings settings) { + this.cpuPollingInterval = GLOBAL_CPU_USAGE_AC_POLLING_INTERVAL_SETTING.get(settings); + this.cpuWindowDuration = GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.get(settings); + this.memoryPollingInterval = GLOBAL_JVM_USAGE_AC_POLLING_INTERVAL_SETTING.get(settings); + this.memoryWindowDuration = GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.get(settings); + } + + public TimeValue getCpuWindowDuration() { + return this.cpuWindowDuration; + } + + public TimeValue getCpuPollingInterval() { + return cpuPollingInterval; + } + + public TimeValue getMemoryPollingInterval() { + return memoryPollingInterval; + } + + public TimeValue getMemoryWindowDuration() { + return memoryWindowDuration; + } + + public void setCpuWindowDuration(TimeValue cpuWindowDuration) { + this.cpuWindowDuration = cpuWindowDuration; + } + + public void setMemoryWindowDuration(TimeValue memoryWindowDuration) { + this.memoryWindowDuration = memoryWindowDuration; + } +} diff --git a/server/src/main/java/org/opensearch/node/resource/tracker/package-info.java b/server/src/main/java/org/opensearch/node/resource/tracker/package-info.java new file mode 100644 index 0000000000000..aace2a019973e --- /dev/null +++ b/server/src/main/java/org/opensearch/node/resource/tracker/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Node level resource usage stats tracker package + */ +package org.opensearch.node.resource.tracker; diff --git a/server/src/main/java/org/opensearch/persistent/NodePersistentTasksExecutor.java b/server/src/main/java/org/opensearch/persistent/NodePersistentTasksExecutor.java index 209df1e1f498d..403630b89e42a 100644 --- a/server/src/main/java/org/opensearch/persistent/NodePersistentTasksExecutor.java +++ b/server/src/main/java/org/opensearch/persistent/NodePersistentTasksExecutor.java @@ -37,7 +37,7 @@ /** * This component is responsible for execution of persistent tasks. - * + *

              * It abstracts away the execution of tasks and greatly simplifies testing of PersistentTasksNodeService * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java index 89bb23930b063..4e38fb34dbf17 100644 --- a/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/opensearch/persistent/PersistentTasksClusterService.java @@ -323,7 +323,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS /** * This unassigns a task from any node, i.e. it is assigned to a {@code null} node with the provided reason. - * + *

              * Since the assignment executor node is null, the {@link PersistentTasksClusterService} will attempt to reassign it to a valid * node quickly. * diff --git a/server/src/main/java/org/opensearch/plugins/CircuitBreakerPlugin.java b/server/src/main/java/org/opensearch/plugins/CircuitBreakerPlugin.java index 7e7345811246f..3552c8286b7a3 100644 --- a/server/src/main/java/org/opensearch/plugins/CircuitBreakerPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/CircuitBreakerPlugin.java @@ -46,9 +46,9 @@ public interface CircuitBreakerPlugin { /** * Each of the factory functions are passed to the configured {@link CircuitBreakerService}. - * + *

              * The service then constructs a {@link CircuitBreaker} given the resulting {@link BreakerSettings}. - * + *

              * Custom circuit breakers settings can be found in {@link BreakerSettings}. * See: * - limit (example: `breaker.foo.limit`) {@link BreakerSettings#CIRCUIT_BREAKER_LIMIT_SETTING} @@ -63,7 +63,7 @@ public interface CircuitBreakerPlugin { /** * The passed {@link CircuitBreaker} object is the same one that was constructed by the {@link BreakerSettings} * provided by {@link CircuitBreakerPlugin#getCircuitBreaker(Settings)}. - * + *

              * This reference should never change throughout the lifetime of the node. * * @param circuitBreaker The constructed {@link CircuitBreaker} object from the {@link BreakerSettings} diff --git a/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java b/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java index c2e147b86d17f..1edd9f52d97a7 100644 --- a/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/ClusterPlugin.java @@ -64,7 +64,7 @@ default Collection createAllocationDeciders(Settings settings /** * Return {@link ShardsAllocator} implementations added by this plugin. - * + *

              * The key of the returned {@link Map} is the name of the allocator, and the value * is a function to construct the allocator. * @@ -88,7 +88,7 @@ default Map getExistingShardsAllocators() { /** * Called when the node is started * - * DEPRECATED: Use {@link #onNodeStarted(DiscoveryNode)} for newer implementations. + * @deprecated Use {@link #onNodeStarted(DiscoveryNode)} for newer implementations. */ @Deprecated default void onNodeStarted() {} diff --git a/server/src/main/java/org/opensearch/plugins/CryptoKeyProviderPlugin.java b/server/src/main/java/org/opensearch/plugins/CryptoKeyProviderPlugin.java new file mode 100644 index 0000000000000..47dc75c7c908a --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/CryptoKeyProviderPlugin.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.crypto.MasterKeyProvider; + +/** + * Crypto plugin to provide support for custom key providers. + * @opensearch.api + */ +@ExperimentalApi +public interface CryptoKeyProviderPlugin { + + /** + * Every call to this method should return a new key provider. + * @param cryptoMetadata These are crypto settings needed for creation of a new key provider. + * @return master key provider. + */ + MasterKeyProvider createKeyProvider(CryptoMetadata cryptoMetadata); + + /** + * One crypto plugin extension implementation refers to a unique key provider type. + * @return key provider type + */ + String type(); +} diff --git a/server/src/main/java/org/opensearch/plugins/CryptoPlugin.java b/server/src/main/java/org/opensearch/plugins/CryptoPlugin.java new file mode 100644 index 0000000000000..ad348d07e23d3 --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/CryptoPlugin.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.MasterKeyProvider; + +/** + * Crypto plugin to provide encryption and decryption support. + * @opensearch.api + */ +@ExperimentalApi +public interface CryptoPlugin { + + /** + * To create a crypto handler for handling encryption and decryption ops. + * @param keyProvider key provider instance to provide keys used in encrypting data. + * @param keyProviderName Name of key provider to distinguish between multiple instances created with different + * configurations of same keyProviderType. + * @param keyProviderType Unique type of key provider to distinguish between different key provider implementations. + * @param onClose Closes key provider or other clean up operations on close. + * @return crypto handler instance. + */ + CryptoHandler getOrCreateCryptoHandler( + MasterKeyProvider keyProvider, + String keyProviderName, + String keyProviderType, + Runnable onClose + ); +} diff --git a/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java b/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java index bca72942bd70e..63f0d826b592f 100644 --- a/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java @@ -68,13 +68,10 @@ public interface DiscoveryPlugin { * This can be handy if you want to provide your own Network interface name like _mycard_ * and implement by yourself the logic to get an actual IP address/hostname based on this * name. - * + *

              * For example: you could call a third party service (an API) to resolve _mycard_. * Then you could define in opensearch.yml settings like: - * - *

              {@code
              -     * network.host: _mycard_
              -     * }
              + * {@code network.host: _mycard_ } */ default NetworkService.CustomNameResolver getCustomNameResolver(Settings settings) { return null; @@ -82,7 +79,7 @@ default NetworkService.CustomNameResolver getCustomNameResolver(Settings setting /** * Returns providers of seed hosts for discovery. - * + *

              * The key of the returned map is the name of the host provider * (see {@link org.opensearch.discovery.DiscoveryModule#DISCOVERY_SEED_PROVIDERS_SETTING}), and * the value is a supplier to construct the host provider when it is selected for use. diff --git a/server/src/main/java/org/opensearch/plugins/EnginePlugin.java b/server/src/main/java/org/opensearch/plugins/EnginePlugin.java index 92ae2ff9cd661..5ea5442d84ffa 100644 --- a/server/src/main/java/org/opensearch/plugins/EnginePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/EnginePlugin.java @@ -92,7 +92,7 @@ default Optional getCustomCodecServiceFactory(IndexSettings * When an index is created this method is invoked for each engine plugin. Engine plugins that need to provide a * custom {@link TranslogDeletionPolicy} can override this method to return a function that takes the {@link IndexSettings} * and a {@link Supplier} for {@link RetentionLeases} and returns a custom {@link TranslogDeletionPolicy}. - * + *

              * Only one of the installed Engine plugins can override this otherwise {@link IllegalStateException} will be thrown. * * @return a function that returns an instance of {@link TranslogDeletionPolicy} diff --git a/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java b/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java index 4dd4010383934..367d335ac4fea 100644 --- a/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/ExtensiblePlugin.java @@ -36,7 +36,7 @@ /** * An extension point for {@link Plugin} implementations to be themselves extensible. - * + *

              * This class provides a callback for extensible plugins to be informed of other plugins * which extend them. * @@ -62,7 +62,7 @@ interface ExtensionLoader { /** * Allow this plugin to load extensions from other plugins. - * + *

              * This method is called once only, after initializing this plugin and all plugins extending this plugin. It is called before * any other methods on this Plugin instance are called. */ diff --git a/server/src/main/java/org/opensearch/plugins/IdentityPlugin.java b/server/src/main/java/org/opensearch/plugins/IdentityPlugin.java index 00f3f8aff585c..410535504f0dd 100644 --- a/server/src/main/java/org/opensearch/plugins/IdentityPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/IdentityPlugin.java @@ -19,16 +19,14 @@ public interface IdentityPlugin { /** - * Get the current subject - * - * Should never return null + * Get the current subject. + * @return Should never return null * */ public Subject getSubject(); /** * Get the Identity Plugin's token manager implementation - * - * Should never return null + * @return Should never return null. */ public TokenManager getTokenManager(); } diff --git a/server/src/main/java/org/opensearch/plugins/IngestPlugin.java b/server/src/main/java/org/opensearch/plugins/IngestPlugin.java index dfc11aa8eea55..dc4f22de71344 100644 --- a/server/src/main/java/org/opensearch/plugins/IngestPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/IngestPlugin.java @@ -46,7 +46,7 @@ public interface IngestPlugin { /** * Returns additional ingest processor types added by this plugin. - * + *

              * The key of the returned {@link Map} is the unique name for the processor which is specified * in pipeline configurations, and the value is a {@link org.opensearch.ingest.Processor.Factory} * to create the processor from a given pipeline configuration. diff --git a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java index de0aecc7833c9..07df40bafe6a1 100644 --- a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java @@ -41,6 +41,7 @@ import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.http.HttpServerTransport; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportInterceptor; @@ -82,7 +83,8 @@ default Map> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.emptyMap(); } @@ -100,7 +102,8 @@ default Map> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { return Collections.emptyMap(); } diff --git a/server/src/main/java/org/opensearch/plugins/Plugin.java b/server/src/main/java/org/opensearch/plugins/Plugin.java index 998741a098792..48486a6b55dfd 100644 --- a/server/src/main/java/org/opensearch/plugins/Plugin.java +++ b/server/src/main/java/org/opensearch/plugins/Plugin.java @@ -40,6 +40,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.inject.Module; import org.opensearch.common.lifecycle.LifecycleComponent; import org.opensearch.common.settings.Setting; @@ -89,6 +90,7 @@ * * @opensearch.api */ +@PublicApi(since = "1.0.0") public abstract class Plugin implements Closeable { /** @@ -119,7 +121,7 @@ public Collection> getGuiceServiceClasses() /** * Returns components added by this plugin. - * + *

              * Any components returned that implement {@link LifecycleComponent} will have their lifecycle managed. * Note: To aid in the migration away from guice, all objects returned as components will be bound in guice * to themselves. diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java index 47195a0264750..cc9cc5b5b5fbf 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginsService.java +++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java @@ -468,7 +468,7 @@ private static Bundle readPluginBundle(final Set bundles, final Path plu /** * Return the given bundles, sorted in dependency loading order. - * + *

              * This sort is stable, so that if two plugins do not have any interdependency, * their relative order from iteration of the provided set will not change. * diff --git a/server/src/main/java/org/opensearch/plugins/RepositoryPlugin.java b/server/src/main/java/org/opensearch/plugins/RepositoryPlugin.java index 189ba3cfc16ab..09233d49f3aea 100644 --- a/server/src/main/java/org/opensearch/plugins/RepositoryPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/RepositoryPlugin.java @@ -52,9 +52,8 @@ public interface RepositoryPlugin { * Returns repository types added by this plugin. * * @param env The environment for the local node, which may be used for the local settings and path.repo - * - * The key of the returned {@link Map} is the type name of the repository and - * the value is a factory to construct the {@link Repository} interface. + * The key of the returned {@link Map} is the type name of the repository and + * the value is a factory to construct the {@link Repository} interface. */ default Map getRepositories( Environment env, @@ -70,9 +69,8 @@ default Map getRepositories( * through the external API. * * @param env The environment for the local node, which may be used for the local settings and path.repo - * - * The key of the returned {@link Map} is the type name of the repository and - * the value is a factory to construct the {@link Repository} interface. + * The key of the returned {@link Map} is the type name of the repository and + * the value is a factory to construct the {@link Repository} interface. */ default Map getInternalRepositories( Environment env, diff --git a/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java b/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java index d2ef2b65c5944..7288a8caaec58 100644 --- a/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java @@ -35,7 +35,7 @@ public interface SearchPipelinePlugin { /** * Returns additional search pipeline request processor types added by this plugin. - * + *

              * The key of the returned {@link Map} is the unique name for the processor which is specified * in pipeline configurations, and the value is a {@link org.opensearch.search.pipeline.Processor.Factory} * to create the processor from a given pipeline configuration. @@ -46,7 +46,7 @@ default Map> getRequestProcess /** * Returns additional search pipeline response processor types added by this plugin. - * + *

              * The key of the returned {@link Map} is the unique name for the processor which is specified * in pipeline configurations, and the value is a {@link org.opensearch.search.pipeline.Processor.Factory} * to create the processor from a given pipeline configuration. @@ -57,7 +57,7 @@ default Map> getResponseProce /** * Returns additional search pipeline search phase results processor types added by this plugin. - * + *

              * The key of the returned {@link Map} is the unique name for the processor which is specified * in pipeline configurations, and the value is a {@link org.opensearch.search.pipeline.Processor.Factory} * to create the processor from a given pipeline configuration. diff --git a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java index a252c46645097..d14c99d9b765b 100644 --- a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java @@ -551,8 +551,8 @@ class CompositeAggregationSpec { private final Consumer aggregatorRegistrar; private final Class valueSourceBuilderClass; @Deprecated - /** This is added for backward compatibility, you don't need to set it, as we use aggregationType instead of - * byte code + /* This is added for backward compatibility, you don't need to set it, as we use aggregationType instead of + byte code */ private Byte byteCode; private final CompositeAggregationParsingFunction parsingFunction; diff --git a/server/src/main/java/org/opensearch/plugins/TelemetryPlugin.java b/server/src/main/java/org/opensearch/plugins/TelemetryPlugin.java index 33dc9b7a0c843..3fc28713b63d5 100644 --- a/server/src/main/java/org/opensearch/plugins/TelemetryPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/TelemetryPlugin.java @@ -8,6 +8,7 @@ package org.opensearch.plugins; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.TelemetrySettings; @@ -15,10 +16,13 @@ /** * Plugin for extending telemetry related classes + * + * @opensearch.experimental */ +@ExperimentalApi public interface TelemetryPlugin { - Optional getTelemetry(TelemetrySettings settings); + Optional getTelemetry(TelemetrySettings telemetrySettings); String getName(); diff --git a/server/src/main/java/org/opensearch/repositories/FilterRepository.java b/server/src/main/java/org/opensearch/repositories/FilterRepository.java index 49a84a82c00ef..0c5cdf24526cb 100644 --- a/server/src/main/java/org/opensearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/opensearch/repositories/FilterRepository.java @@ -143,6 +143,16 @@ public long getRestoreThrottleTimeInNanos() { return in.getRestoreThrottleTimeInNanos(); } + @Override + public long getRemoteUploadThrottleTimeInNanos() { + return in.getRemoteUploadThrottleTimeInNanos(); + } + + @Override + public long getRemoteDownloadThrottleTimeInNanos() { + return in.getRemoteDownloadThrottleTimeInNanos(); + } + @Override public String startVerification() { return in.startVerification(); @@ -163,6 +173,11 @@ public boolean isReadOnly() { return in.isReadOnly(); } + @Override + public boolean isSystemRepository() { + return in.isSystemRepository(); + } + @Override public void snapshotShard( Store store, diff --git a/server/src/main/java/org/opensearch/repositories/IndexId.java b/server/src/main/java/org/opensearch/repositories/IndexId.java index 0b0644ce932e5..d88eeb5aa9c49 100644 --- a/server/src/main/java/org/opensearch/repositories/IndexId.java +++ b/server/src/main/java/org/opensearch/repositories/IndexId.java @@ -80,7 +80,7 @@ public String getName() { * The unique ID for the index within the repository. This is *not* the same as the * index's UUID, but merely a unique file/URL friendly identifier that a repository can * use to name blobs for the index. - * + *

              * We could not use the index's actual UUID (See {@link Index#getUUID()}) because in the * case of snapshot/restore, the index UUID in the snapshotted index will be different * from the index UUID assigned to it when it is restored. Hence, the actual index UUID diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java index f18dc63013abf..68669feb16abc 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java @@ -37,6 +37,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; import org.opensearch.action.ActionRunnable; +import org.opensearch.action.admin.cluster.crypto.CryptoSettings; import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.opensearch.cluster.AckedClusterStateUpdateTask; @@ -48,6 +49,7 @@ import org.opensearch.cluster.SnapshotDeletionsInProgress; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; +import org.opensearch.cluster.metadata.CryptoMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; @@ -56,12 +58,12 @@ import org.opensearch.cluster.service.ClusterManagerTaskKeys; import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; @@ -82,12 +84,14 @@ import java.util.stream.Stream; import static org.opensearch.repositories.blobstore.BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; /** * Service responsible for maintaining and providing access to snapshot repositories on nodes. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class RepositoriesService extends AbstractLifecycleComponent implements ClusterStateApplier { private static final Logger logger = LogManager.getLogger(RepositoriesService.class); @@ -151,7 +155,7 @@ public RepositoriesService( } /** - * Registers new repository in the cluster + * Registers new repository or updates an existing repository in the cluster *

              * This method can be only called on the cluster-manager node. It tries to create a new repository on the master * and if it was successful it adds new repository to cluster metadata. @@ -159,12 +163,20 @@ public RepositoriesService( * @param request register repository request * @param listener register repository listener */ - public void registerRepository(final PutRepositoryRequest request, final ActionListener listener) { + public void registerOrUpdateRepository(final PutRepositoryRequest request, final ActionListener listener) { assert lifecycle.started() : "Trying to register new repository but service is in state [" + lifecycle.state() + "]"; - final RepositoryMetadata newRepositoryMetadata = new RepositoryMetadata(request.name(), request.type(), request.settings()); + final RepositoryMetadata newRepositoryMetadata = new RepositoryMetadata( + request.name(), + request.type(), + request.settings(), + CryptoMetadata.fromRequest(request.cryptoSettings()) + ); validate(request.name()); validateRepositoryMetadataSettings(clusterService, request.name(), request.settings()); + if (newRepositoryMetadata.cryptoMetadata() != null) { + validate(newRepositoryMetadata.cryptoMetadata().keyProviderName()); + } final ActionListener registrationListener; if (request.verify()) { @@ -211,27 +223,58 @@ public ClusterState execute(ClusterState currentState) { if (repositories == null) { logger.info("put repository [{}]", request.name()); repositories = new RepositoriesMetadata( - Collections.singletonList(new RepositoryMetadata(request.name(), request.type(), request.settings())) + Collections.singletonList( + new RepositoryMetadata( + request.name(), + request.type(), + request.settings(), + CryptoMetadata.fromRequest(request.cryptoSettings()) + ) + ) ); } else { boolean found = false; List repositoriesMetadata = new ArrayList<>(repositories.repositories().size() + 1); for (RepositoryMetadata repositoryMetadata : repositories.repositories()) { - if (repositoryMetadata.name().equals(newRepositoryMetadata.name())) { - if (newRepositoryMetadata.equalsIgnoreGenerations(repositoryMetadata)) { + RepositoryMetadata updatedRepositoryMetadata = newRepositoryMetadata; + if (isSystemRepositorySettingPresent(repositoryMetadata.settings())) { + Settings updatedSettings = Settings.builder() + .put(newRepositoryMetadata.settings()) + .put(SYSTEM_REPOSITORY_SETTING.getKey(), true) + .build(); + updatedRepositoryMetadata = new RepositoryMetadata( + newRepositoryMetadata.name(), + newRepositoryMetadata.type(), + updatedSettings, + newRepositoryMetadata.cryptoMetadata() + ); + } + if (repositoryMetadata.name().equals(updatedRepositoryMetadata.name())) { + if (updatedRepositoryMetadata.equalsIgnoreGenerations(repositoryMetadata)) { // Previous version is the same as this one no update is needed. return currentState; } + ensureCryptoSettingsAreSame(repositoryMetadata, request); found = true; - repositoriesMetadata.add(newRepositoryMetadata); + if (isSystemRepositorySettingPresent(repositoryMetadata.settings())) { + ensureValidSystemRepositoryUpdate(updatedRepositoryMetadata, repositoryMetadata); + } + repositoriesMetadata.add(updatedRepositoryMetadata); } else { repositoriesMetadata.add(repositoryMetadata); } } if (!found) { logger.info("put repository [{}]", request.name()); - repositoriesMetadata.add(new RepositoryMetadata(request.name(), request.type(), request.settings())); + repositoriesMetadata.add( + new RepositoryMetadata( + request.name(), + request.type(), + request.settings(), + CryptoMetadata.fromRequest(request.cryptoSettings()) + ) + ); } else { logger.info("update repository [{}]", request.name()); } @@ -289,6 +332,7 @@ public ClusterState execute(ClusterState currentState) { for (RepositoryMetadata repositoryMetadata : repositories.repositories()) { if (Regex.simpleMatch(request.name(), repositoryMetadata.name())) { ensureRepositoryNotInUse(currentState, repositoryMetadata.name()); + ensureNotSystemRepository(repositoryMetadata); logger.info("delete repository [{}]", repositoryMetadata.name()); changed = true; } else { @@ -393,7 +437,13 @@ public void applyClusterState(ClusterChangedEvent event) { // Check if repositories got changed if ((oldMetadata == null && newMetadata == null) || (oldMetadata != null && oldMetadata.equalsIgnoreGenerations(newMetadata))) { for (Repository repo : repositories.values()) { - repo.updateState(state); + // Update State should only be invoked for repository which are already in cluster state. This + // check needs to be added as system repositories can be populated before cluster state has the + // repository metadata. + RepositoriesMetadata stateRepositoriesMetadata = state.metadata().custom(RepositoriesMetadata.TYPE); + if (stateRepositoriesMetadata != null && stateRepositoriesMetadata.repository(repo.getMetadata().name()) != null) { + repo.updateState(state); + } } return; } @@ -407,7 +457,6 @@ public void applyClusterState(ClusterChangedEvent event) { logger.debug("unregistering repository [{}]", entry.getKey()); Repository repository = entry.getValue(); closeRepository(repository); - archiveRepositoryStats(repository, state.version()); } else { survivors.put(entry.getKey(), entry.getValue()); } @@ -424,24 +473,48 @@ public void applyClusterState(ClusterChangedEvent event) { if (previousMetadata.type().equals(repositoryMetadata.type()) == false || previousMetadata.settings().equals(repositoryMetadata.settings()) == false) { // Previous version is different from the version in settings - logger.debug("updating repository [{}]", repositoryMetadata.name()); - closeRepository(repository); - archiveRepositoryStats(repository, state.version()); - repository = null; - try { - repository = createRepository(repositoryMetadata, typesRegistry); - } catch (RepositoryException ex) { - // TODO: this catch is bogus, it means the old repo is already closed, - // but we have nothing to replace it - logger.warn( - () -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetadata.name()), - ex + if (repository.isSystemRepository() && repository.isReloadable()) { + logger.debug( + "updating repository [{}] in-place to use new metadata [{}]", + repositoryMetadata.name(), + repositoryMetadata ); + repository.validateMetadata(repositoryMetadata); + repository.reload(repositoryMetadata); + } else { + logger.debug("updating repository [{}]", repositoryMetadata.name()); + closeRepository(repository); + repository = null; + try { + repository = createRepository(repositoryMetadata, typesRegistry); + } catch (RepositoryException ex) { + // TODO: this catch is bogus, it means the old repo is already closed, + // but we have nothing to replace it + logger.warn( + () -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetadata.name()), + ex + ); + } } } } else { try { - repository = createRepository(repositoryMetadata, typesRegistry); + // System repositories are already created and verified and hence during cluster state + // update we should avoid creating it again. Once the cluster state is update with the + // repository metadata the repository metadata update will land in the above if block. + if (repositories.containsKey(repositoryMetadata.name()) == false) { + repository = createRepository(repositoryMetadata, typesRegistry); + } else { + // Validate the repository metadata which was created during bootstrap is same as the + // one present in incoming cluster state. + repository = repositories.get(repositoryMetadata.name()); + if (repositoryMetadata.equalsIgnoreGenerations(repository.getMetadata()) == false) { + throw new RepositoryException( + repositoryMetadata.name(), + "repository was already " + "registered with different metadata during bootstrap than cluster state" + ); + } + } } catch (RepositoryException ex) { logger.warn(() -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetadata.name()), ex); } @@ -500,12 +573,12 @@ public Repository repository(String repositoryName) { } public List repositoriesStats() { - List archivedRepoStats = repositoriesStatsArchive.getArchivedStats(); List activeRepoStats = getRepositoryStatsForActiveRepositories(); + return activeRepoStats; + } - List repositoriesStats = new ArrayList<>(archivedRepoStats); - repositoriesStats.addAll(activeRepoStats); - return repositoriesStats; + public RepositoriesStats getRepositoriesStats() { + return new RepositoriesStats(repositoriesStats()); } private List getRepositoryStatsForActiveRepositories() { @@ -560,18 +633,16 @@ public void unregisterInternalRepository(String name) { } /** Closes the given repository. */ - private void closeRepository(Repository repository) { + public void closeRepository(Repository repository) { logger.debug("closing repository [{}][{}]", repository.getMetadata().type(), repository.getMetadata().name()); repository.close(); } - private void archiveRepositoryStats(Repository repository, long clusterStateVersion) { - if (repository instanceof MeteredBlobStoreRepository) { - RepositoryStatsSnapshot stats = ((MeteredBlobStoreRepository) repository).statsSnapshotForArchival(clusterStateVersion); - if (repositoriesStatsArchive.archive(stats) == false) { - logger.warn("Unable to archive the repository stats [{}] as the archive is full.", stats); - } - } + /** + * Creates repository holder. This method starts the non-internal repository + */ + public Repository createRepository(RepositoryMetadata repositoryMetadata) { + return this.createRepository(repositoryMetadata, typesRegistry); } /** @@ -598,15 +669,15 @@ private Repository createRepository(RepositoryMetadata repositoryMetadata, Map repos) { + if (repositories.isEmpty()) { + repositories = repos; + } else { + throw new IllegalArgumentException("can't overwrite as repositories are already present"); + } + } + + private static void ensureNotSystemRepository(RepositoryMetadata repositoryMetadata) { + if (isSystemRepositorySettingPresent(repositoryMetadata.settings())) { + throw new RepositoryException(repositoryMetadata.name(), "cannot delete a system repository"); + } + } + + private static boolean isSystemRepositorySettingPresent(Settings repositoryMetadataSettings) { + return SYSTEM_REPOSITORY_SETTING.get(repositoryMetadataSettings); + } + + private static boolean isValueEqual(String key, String newValue, String currentValue) { + if (newValue == null && currentValue == null) { + return true; + } + if (newValue == null) { + throw new IllegalArgumentException("[" + key + "] cannot be empty, " + "current value [" + currentValue + "]"); + } + if (newValue.equals(currentValue) == false) { + throw new IllegalArgumentException( + "trying to modify an unmodifiable attribute " + + key + + " of system repository from " + + "current value [" + + currentValue + + "] to new value [" + + newValue + + "]" + ); + } + return true; + } + + public void ensureValidSystemRepositoryUpdate(RepositoryMetadata newRepositoryMetadata, RepositoryMetadata currentRepositoryMetadata) { + if (isSystemRepositorySettingPresent(currentRepositoryMetadata.settings())) { + try { + isValueEqual("type", newRepositoryMetadata.type(), currentRepositoryMetadata.type()); + + Repository repository = repositories.get(currentRepositoryMetadata.name()); + Settings newRepositoryMetadataSettings = newRepositoryMetadata.settings(); + Settings currentRepositoryMetadataSettings = currentRepositoryMetadata.settings(); + + List restrictedSettings = repository.getRestrictedSystemRepositorySettings() + .stream() + .map(setting -> setting.getKey()) + .collect(Collectors.toList()); + + for (String restrictedSettingKey : restrictedSettings) { + isValueEqual( + restrictedSettingKey, + newRepositoryMetadataSettings.get(restrictedSettingKey), + currentRepositoryMetadataSettings.get(restrictedSettingKey) + ); + } + } catch (IllegalArgumentException e) { + throw new RepositoryException(currentRepositoryMetadata.name(), e.getMessage()); + } + } + } + @Override protected void doStart() { diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesStats.java b/server/src/main/java/org/opensearch/repositories/RepositoriesStats.java new file mode 100644 index 0000000000000..b24e0dddd852a --- /dev/null +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesStats.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.util.CollectionUtils; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; + +/** + * Encapsulates stats for multiple repositories* + */ +public class RepositoriesStats implements Writeable, ToXContentObject { + + List repositoryStatsSnapshots; + + public RepositoriesStats(List repositoryStatsSnapshots) { + this.repositoryStatsSnapshots = repositoryStatsSnapshots; + } + + public RepositoriesStats(StreamInput in) throws IOException { + this.repositoryStatsSnapshots = in.readList(RepositoryStatsSnapshot::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(repositoryStatsSnapshots); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray("repositories"); + if (CollectionUtils.isEmpty(repositoryStatsSnapshots) == false) { + for (RepositoryStatsSnapshot repositoryStatsSnapshot : repositoryStatsSnapshots) { + repositoryStatsSnapshot.toXContent(builder, params); + } + } + builder.endArray(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesStatsArchive.java b/server/src/main/java/org/opensearch/repositories/RepositoriesStatsArchive.java index b8f100706f81e..3d35f75176eaf 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesStatsArchive.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesStatsArchive.java @@ -70,11 +70,6 @@ public RepositoriesStatsArchive(TimeValue retentionPeriod, int maxCapacity, Long * @return {@code true} if the repository stats were archived, {@code false} otherwise. */ synchronized boolean archive(final RepositoryStatsSnapshot repositoryStats) { - assert containsRepositoryStats(repositoryStats) == false : "A repository with ephemeral id " - + repositoryStats.getRepositoryInfo().ephemeralId - + " is already archived"; - assert repositoryStats.isArchived(); - evict(); if (archive.size() >= maxCapacity) { @@ -116,15 +111,6 @@ private void evict() { } } - private boolean containsRepositoryStats(RepositoryStatsSnapshot repositoryStats) { - return archive.stream() - .anyMatch( - entry -> entry.repositoryStatsSnapshot.getRepositoryInfo().ephemeralId.equals( - repositoryStats.getRepositoryInfo().ephemeralId - ) - ); - } - private static class ArchiveEntry { private final RepositoryStatsSnapshot repositoryStatsSnapshot; private final long createdAtMillis; diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index 8d55b095e8e39..6781b72d4b8d7 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Nullable; import org.opensearch.common.lifecycle.LifecycleComponent; +import org.opensearch.common.settings.Setting; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.mapper.MapperService; @@ -55,6 +56,7 @@ import java.io.IOException; import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.function.Consumer; @@ -212,6 +214,16 @@ default void deleteSnapshotsAndReleaseLockFiles( */ long getRestoreThrottleTimeInNanos(); + /** + * Returns restore throttle time in nanoseconds + */ + long getRemoteUploadThrottleTimeInNanos(); + + /** + * Returns restore throttle time in nanoseconds + */ + long getRemoteDownloadThrottleTimeInNanos(); + /** * Returns stats on the repository usage */ @@ -251,6 +263,13 @@ default RepositoryStats stats() { */ boolean isReadOnly(); + /** + * Returns true if the repository is managed by the system directly and doesn't allow managing the lifetime of the + * repository through external APIs + * @return true if the repository is system managed + */ + boolean isSystemRepository(); + /** * Creates a snapshot of the shard based on the index commit point. *

              @@ -339,6 +358,14 @@ void restoreShard( ActionListener listener ); + /** + * Returns the list of restricted system repository settings that cannot be mutated post repository creation. + * @return list of settings + */ + default List> getRestrictedSystemRepositorySettings() { + return Collections.emptyList(); + } + /** * Returns Snapshot Shard Metadata for remote store interop enabled snapshot. *

              @@ -437,4 +464,22 @@ default void cloneRemoteStoreIndexShardSnapshot( default Map adaptUserMetadata(Map userMetadata) { return userMetadata; } + + /** + * Checks if the repository can be reloaded inplace or not + * @return true if the repository can be reloaded inplace, false otherwise + */ + default boolean isReloadable() { + return false; + } + + /** + * Reload the repository inplace + */ + default void reload(RepositoryMetadata repositoryMetadata) {} + + /** + * Validate the repository metadata + */ + default void validateMetadata(RepositoryMetadata repositoryMetadata) {} } diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryInfo.java b/server/src/main/java/org/opensearch/repositories/RepositoryInfo.java index 8aa86fc46d591..387a685bd6526 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryInfo.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryInfo.java @@ -32,7 +32,6 @@ package org.opensearch.repositories; -import org.opensearch.common.Nullable; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -51,64 +50,27 @@ * @opensearch.internal */ public final class RepositoryInfo implements Writeable, ToXContentFragment { - public final String ephemeralId; public final String name; public final String type; public final Map location; - public final long startedAt; - @Nullable - public final Long stoppedAt; - public RepositoryInfo(String ephemeralId, String name, String type, Map location, long startedAt) { - this(ephemeralId, name, type, location, startedAt, null); - } - - public RepositoryInfo( - String ephemeralId, - String name, - String type, - Map location, - long startedAt, - @Nullable Long stoppedAt - ) { - this.ephemeralId = ephemeralId; + public RepositoryInfo(String name, String type, Map location) { this.name = name; this.type = type; this.location = location; - this.startedAt = startedAt; - if (stoppedAt != null && startedAt > stoppedAt) { - throw new IllegalArgumentException("createdAt must be before or equal to stoppedAt"); - } - this.stoppedAt = stoppedAt; } public RepositoryInfo(StreamInput in) throws IOException { - this.ephemeralId = in.readString(); this.name = in.readString(); this.type = in.readString(); this.location = in.readMap(StreamInput::readString, StreamInput::readString); - this.startedAt = in.readLong(); - this.stoppedAt = in.readOptionalLong(); - } - - public RepositoryInfo stopped(long stoppedAt) { - assert isStopped() == false : "The repository is already stopped"; - - return new RepositoryInfo(ephemeralId, name, type, location, startedAt, stoppedAt); - } - - public boolean isStopped() { - return stoppedAt != null; } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(ephemeralId); out.writeString(name); out.writeString(type); out.writeMap(location, StreamOutput::writeString, StreamOutput::writeString); - out.writeLong(startedAt); - out.writeOptionalLong(stoppedAt); } @Override @@ -116,11 +78,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("repository_name", name); builder.field("repository_type", type); builder.field("repository_location", location); - builder.field("repository_ephemeral_id", ephemeralId); - builder.field("repository_started_at", startedAt); - if (stoppedAt != null) { - builder.field("repository_stopped_at", stoppedAt); - } return builder; } @@ -129,17 +86,12 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; RepositoryInfo that = (RepositoryInfo) o; - return ephemeralId.equals(that.ephemeralId) - && name.equals(that.name) - && type.equals(that.type) - && location.equals(that.location) - && startedAt == that.startedAt - && Objects.equals(stoppedAt, that.stoppedAt); + return name.equals(that.name) && type.equals(that.type) && location.equals(that.location); } @Override public int hashCode() { - return Objects.hash(ephemeralId, name, type, location, startedAt, stoppedAt); + return Objects.hash(name, type, location); } @Override diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryStats.java b/server/src/main/java/org/opensearch/repositories/RepositoryStats.java index efd5d6f8560b6..ab97c5eaa1f7a 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryStats.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryStats.java @@ -32,9 +32,13 @@ package org.opensearch.repositories; +import org.opensearch.common.Nullable; +import org.opensearch.common.blobstore.BlobStore; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; import java.util.Collections; @@ -47,32 +51,63 @@ * * @opensearch.internal */ -public class RepositoryStats implements Writeable { +public class RepositoryStats implements Writeable, ToXContentFragment { public static final RepositoryStats EMPTY_STATS = new RepositoryStats(Collections.emptyMap()); + @Nullable public final Map requestCounts; + @Nullable + public final Map> extendedStats; + public final boolean detailed; public RepositoryStats(Map requestCounts) { this.requestCounts = Collections.unmodifiableMap(requestCounts); + this.extendedStats = Collections.emptyMap(); + this.detailed = false; + } + + public RepositoryStats(Map> extendedStats, boolean detailed) { + this.requestCounts = Collections.emptyMap(); + this.extendedStats = Collections.unmodifiableMap(extendedStats); + this.detailed = detailed; } public RepositoryStats(StreamInput in) throws IOException { this.requestCounts = in.readMap(StreamInput::readString, StreamInput::readLong); + this.extendedStats = in.readMap( + e -> e.readEnum(BlobStore.Metric.class), + i -> i.readMap(StreamInput::readString, StreamInput::readLong) + ); + this.detailed = in.readBoolean(); } public RepositoryStats merge(RepositoryStats otherStats) { - final Map result = new HashMap<>(); - result.putAll(requestCounts); - for (Map.Entry entry : otherStats.requestCounts.entrySet()) { - result.merge(entry.getKey(), entry.getValue(), Math::addExact); + assert this.detailed == otherStats.detailed; + if (detailed) { + final Map> result = new HashMap<>(); + result.putAll(extendedStats); + for (Map.Entry> entry : otherStats.extendedStats.entrySet()) { + for (Map.Entry nested : entry.getValue().entrySet()) { + result.get(entry.getKey()).merge(nested.getKey(), nested.getValue(), Math::addExact); + } + } + return new RepositoryStats(result, true); + } else { + final Map result = new HashMap<>(); + result.putAll(requestCounts); + for (Map.Entry entry : otherStats.requestCounts.entrySet()) { + result.merge(entry.getKey(), entry.getValue(), Math::addExact); + } + return new RepositoryStats(result); } - return new RepositoryStats(result); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(requestCounts, StreamOutput::writeString, StreamOutput::writeLong); + out.writeMap(extendedStats, StreamOutput::writeEnum, (o, v) -> o.writeMap(v, StreamOutput::writeString, StreamOutput::writeLong)); + out.writeBoolean(detailed); } @Override @@ -80,16 +115,32 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; RepositoryStats that = (RepositoryStats) o; - return requestCounts.equals(that.requestCounts); + return requestCounts.equals(that.requestCounts) && extendedStats.equals(that.extendedStats) && detailed == that.detailed; } @Override public int hashCode() { - return Objects.hash(requestCounts); + return Objects.hash(requestCounts, detailed, extendedStats); } @Override public String toString() { - return "RepositoryStats{" + "requestCounts=" + requestCounts + '}'; + return "RepositoryStats{" + "requestCounts=" + requestCounts + "extendedStats=" + extendedStats + "detailed =" + detailed + "}"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (detailed == false) { + builder.field("request_counts", requestCounts); + } else { + extendedStats.forEach((k, v) -> { + try { + builder.field(k.metricName(), v); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + return builder; } } diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryStatsSnapshot.java b/server/src/main/java/org/opensearch/repositories/RepositoryStatsSnapshot.java index 2b061cd2c2cc9..0a727980fad0d 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryStatsSnapshot.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryStatsSnapshot.java @@ -53,21 +53,17 @@ public final class RepositoryStatsSnapshot implements Writeable, ToXContentObjec private final RepositoryInfo repositoryInfo; private final RepositoryStats repositoryStats; private final long clusterVersion; - private final boolean archived; - public RepositoryStatsSnapshot(RepositoryInfo repositoryInfo, RepositoryStats repositoryStats, long clusterVersion, boolean archived) { - assert archived != (clusterVersion == UNKNOWN_CLUSTER_VERSION); + public RepositoryStatsSnapshot(RepositoryInfo repositoryInfo, RepositoryStats repositoryStats, long clusterVersion) { this.repositoryInfo = repositoryInfo; this.repositoryStats = repositoryStats; this.clusterVersion = clusterVersion; - this.archived = archived; } public RepositoryStatsSnapshot(StreamInput in) throws IOException { this.repositoryInfo = new RepositoryInfo(in); this.repositoryStats = new RepositoryStats(in); this.clusterVersion = in.readLong(); - this.archived = in.readBoolean(); } public RepositoryInfo getRepositoryInfo() { @@ -78,10 +74,6 @@ public RepositoryStats getRepositoryStats() { return repositoryStats; } - public boolean isArchived() { - return archived; - } - public long getClusterVersion() { return clusterVersion; } @@ -91,18 +83,13 @@ public void writeTo(StreamOutput out) throws IOException { repositoryInfo.writeTo(out); repositoryStats.writeTo(out); out.writeLong(clusterVersion); - out.writeBoolean(archived); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); repositoryInfo.toXContent(builder, params); - builder.field("request_counts", repositoryStats.requestCounts); - builder.field("archived", archived); - if (archived) { - builder.field("cluster_version", clusterVersion); - } + repositoryStats.toXContent(builder, params); builder.endObject(); return builder; } @@ -114,13 +101,12 @@ public boolean equals(Object o) { RepositoryStatsSnapshot that = (RepositoryStatsSnapshot) o; return repositoryInfo.equals(that.repositoryInfo) && repositoryStats.equals(that.repositoryStats) - && clusterVersion == that.clusterVersion - && archived == that.archived; + && clusterVersion == that.clusterVersion; } @Override public int hashCode() { - return Objects.hash(repositoryInfo, repositoryStats, clusterVersion, archived); + return Objects.hash(repositoryInfo, repositoryStats, clusterVersion); } @Override diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index da02fa81925db..abdcb66e29dbe 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -72,7 +72,10 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.blobstore.DeleteResult; +import org.opensearch.common.blobstore.EncryptedBlobStore; import org.opensearch.common.blobstore.fs.FsBlobContainer; +import org.opensearch.common.blobstore.transfer.stream.OffsetRangeInputStream; +import org.opensearch.common.blobstore.transfer.stream.RateLimitingOffsetRangeInputStream; import org.opensearch.common.collect.Tuple; import org.opensearch.common.compress.DeflateCompressor; import org.opensearch.common.io.Streams; @@ -97,6 +100,7 @@ import org.opensearch.core.compress.Compressor; import org.opensearch.core.compress.CompressorRegistry; import org.opensearch.core.compress.NotXContentException; +import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.index.snapshots.IndexShardSnapshotFailedException; import org.opensearch.core.util.BytesRefUtils; @@ -113,11 +117,12 @@ import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.snapshots.blobstore.SlicedInputStream; import org.opensearch.index.snapshots.blobstore.SnapshotFiles; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.store.lockmanager.FileLockInfo; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; -import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.repositories.IndexId; @@ -145,6 +150,7 @@ import java.io.InputStream; import java.nio.file.NoSuchFileException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -168,6 +174,7 @@ import java.util.stream.Stream; import static org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName; +import static org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS; /** * BlobStore - based implementation of Snapshot Repository @@ -283,22 +290,39 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp */ public static final Setting READONLY_SETTING = Setting.boolSetting("readonly", false, Setting.Property.NodeScope); - protected final boolean supportURLRepo; + /*** + * Setting to set repository as system repository + */ + public static final Setting SYSTEM_REPOSITORY_SETTING = Setting.boolSetting( + "system_repository", + false, + Setting.Property.NodeScope + ); + + protected volatile boolean supportURLRepo; - private final int maxShardBlobDeleteBatch; + private volatile int maxShardBlobDeleteBatch; - private final Compressor compressor; + private volatile Compressor compressor; - private final boolean cacheRepositoryData; + private volatile boolean cacheRepositoryData; - private final RateLimiter snapshotRateLimiter; + private volatile RateLimiter snapshotRateLimiter; - private final RateLimiter restoreRateLimiter; + private volatile RateLimiter restoreRateLimiter; + + private volatile RateLimiter remoteUploadRateLimiter; + + private volatile RateLimiter remoteDownloadRateLimiter; private final CounterMetric snapshotRateLimitingTimeInNanos = new CounterMetric(); private final CounterMetric restoreRateLimitingTimeInNanos = new CounterMetric(); + private final CounterMetric remoteDownloadRateLimitingTimeInNanos = new CounterMetric(); + + private final CounterMetric remoteUploadRateLimitingTimeInNanos = new CounterMetric(); + public static final ChecksumBlobStoreFormat GLOBAL_METADATA_FORMAT = new ChecksumBlobStoreFormat<>( "metadata", METADATA_NAME_FORMAT, @@ -334,7 +358,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp BlobStoreIndexShardSnapshots::fromXContent ); - private final boolean readOnly; + private volatile boolean readOnly; + + private final boolean isSystemRepository; private final Object lock = new Object(); @@ -342,7 +368,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final SetOnce blobStore = new SetOnce<>(); - private final ClusterService clusterService; + protected final ClusterService clusterService; private final RecoverySettings recoverySettings; @@ -379,33 +405,54 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp /** * IO buffer size hint for reading and writing to the underlying blob store. */ - protected final int bufferSize; + protected volatile int bufferSize; /** * Constructs new BlobStoreRepository - * @param metadata The metadata for this repository including name and settings + * @param repositoryMetadata The metadata for this repository including name and settings * @param clusterService ClusterService */ protected BlobStoreRepository( - final RepositoryMetadata metadata, - final boolean compress, + final RepositoryMetadata repositoryMetadata, final NamedXContentRegistry namedXContentRegistry, final ClusterService clusterService, final RecoverySettings recoverySettings ) { - this.metadata = metadata; + // Read RepositoryMetadata as the first step + readRepositoryMetadata(repositoryMetadata); + + isSystemRepository = SYSTEM_REPOSITORY_SETTING.get(metadata.settings()); this.namedXContentRegistry = namedXContentRegistry; this.threadPool = clusterService.getClusterApplierService().threadPool(); this.clusterService = clusterService; this.recoverySettings = recoverySettings; - this.supportURLRepo = SUPPORT_URL_REPO.get(metadata.settings()); + } + + @Override + public void reload(RepositoryMetadata repositoryMetadata) { + readRepositoryMetadata(repositoryMetadata); + } + + /** + * Reloads the values derived from the Repository Metadata + * + * @param repositoryMetadata RepositoryMetadata instance to derive the values from + */ + private void readRepositoryMetadata(RepositoryMetadata repositoryMetadata) { + this.metadata = repositoryMetadata; + + supportURLRepo = SUPPORT_URL_REPO.get(metadata.settings()); snapshotRateLimiter = getRateLimiter(metadata.settings(), "max_snapshot_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)); restoreRateLimiter = getRateLimiter(metadata.settings(), "max_restore_bytes_per_sec", ByteSizeValue.ZERO); + remoteUploadRateLimiter = getRateLimiter(metadata.settings(), "max_remote_upload_bytes_per_sec", ByteSizeValue.ZERO); + remoteDownloadRateLimiter = getRateLimiter(metadata.settings(), "max_remote_download_bytes_per_sec", ByteSizeValue.ZERO); readOnly = READONLY_SETTING.get(metadata.settings()); cacheRepositoryData = CACHE_REPOSITORY_DATA.get(metadata.settings()); bufferSize = Math.toIntExact(BUFFER_SIZE_SETTING.get(metadata.settings()).getBytes()); maxShardBlobDeleteBatch = MAX_SNAPSHOT_SHARD_BLOB_DELETE_BATCH_SIZE.get(metadata.settings()); - this.compressor = compress ? COMPRESSION_TYPE_SETTING.get(metadata.settings()) : CompressorRegistry.none(); + compressor = COMPRESS_SETTING.get(metadata.settings()) + ? COMPRESSION_TYPE_SETTING.get(metadata.settings()) + : CompressorRegistry.none(); } @Override @@ -620,7 +667,7 @@ public void cloneRemoteStoreIndexShardSnapshot( RemoteStoreShardShallowCopySnapshot remStoreBasedShardMetadata = (RemoteStoreShardShallowCopySnapshot) indexShardSnapshot; String indexUUID = remStoreBasedShardMetadata.getIndexUUID(); String remoteStoreRepository = remStoreBasedShardMetadata.getRemoteStoreRepository(); - RemoteStoreMetadataLockManager remoteStoreMetadataLockManger = remoteStoreLockManagerFactory.newLockManager( + RemoteStoreLockManager remoteStoreMetadataLockManger = remoteStoreLockManagerFactory.newLockManager( remoteStoreRepository, indexUUID, String.valueOf(shardId.shardId()) @@ -746,6 +793,9 @@ public BlobStore blobStore() { } try { store = createBlobStore(); + if (metadata.cryptoMetadata() != null) { + store = new EncryptedBlobStore(store, metadata.cryptoMetadata()); + } } catch (RepositoryException e) { throw e; } catch (Exception e) { @@ -793,11 +843,21 @@ public RepositoryMetadata getMetadata() { return metadata; } + public NamedXContentRegistry getNamedXContentRegistry() { + return namedXContentRegistry; + } + + public Compressor getCompressor() { + return compressor; + } + @Override public RepositoryStats stats() { final BlobStore store = blobStore.get(); if (store == null) { return RepositoryStats.EMPTY_STATS; + } else if (store.extendedStats() != null && store.extendedStats().isEmpty() == false) { + return new RepositoryStats(store.extendedStats(), true); } return new RepositoryStats(store.stats()); } @@ -1135,11 +1195,28 @@ private void executeStaleShardDelete( // Releasing lock file before deleting the shallow-snap-UUID file because in case of any failure while // releasing the lock file, we would still have the shallow-snap-UUID file and that would be used during // next delete operation for releasing this lock file - RemoteStoreMetadataLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory - .newLockManager(remoteStoreRepoForIndex, indexUUID, shardId); + RemoteStoreLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory.newLockManager( + remoteStoreRepoForIndex, + indexUUID, + shardId + ); remoteStoreMetadataLockManager.release( FileLockInfo.getLockInfoBuilder().withAcquirerId(snapshotUUID).build() ); + if (!isIndexPresent(clusterService, indexUUID)) { + // this is a temporary solution where snapshot deletion triggers remote store side + // cleanup if index is already deleted. We will add a poller in future to take + // care of remote store side cleanup. + // see https://github.com/opensearch-project/OpenSearch/issues/8469 + new RemoteSegmentStoreDirectoryFactory( + remoteStoreLockManagerFactory.getRepositoriesService(), + threadPool + ).newDirectory( + remoteStoreRepoForIndex, + indexUUID, + new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.valueOf(shardId)) + ).close(); + } } } } @@ -1550,6 +1627,15 @@ private void cleanupStaleIndices( } } + private static boolean isIndexPresent(ClusterService clusterService, String indexUUID) { + for (final IndexMetadata indexMetadata : clusterService.state().metadata().getIndices().values()) { + if (indexUUID.equals(indexMetadata.getIndexUUID())) { + return true; + } + } + return false; + } + private void executeOneStaleIndexDelete( BlockingQueue> staleIndicesToDelete, RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, @@ -1582,11 +1668,25 @@ private void executeOneStaleIndexDelete( // Releasing lock files before deleting the shallow-snap-UUID file because in case of any failure // while releasing the lock file, we would still have the corresponding shallow-snap-UUID file // and that would be used during next delete operation for releasing this stale lock file - RemoteStoreMetadataLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory + RemoteStoreLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory .newLockManager(remoteStoreRepoForIndex, indexUUID, shardBlob.getKey()); remoteStoreMetadataLockManager.release( FileLockInfo.getLockInfoBuilder().withAcquirerId(snapshotUUID).build() ); + if (!isIndexPresent(clusterService, indexUUID)) { + // this is a temporary solution where snapshot deletion triggers remote store side + // cleanup if index is already deleted. We will add a poller in future to take + // care of remote store side cleanup. + // see https://github.com/opensearch-project/OpenSearch/issues/8469 + new RemoteSegmentStoreDirectoryFactory( + remoteStoreLockManagerFactory.getRepositoriesService(), + threadPool + ).newDirectory( + remoteStoreRepoForIndex, + indexUUID, + new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.valueOf(shardBlob.getKey())) + ).close(); + } } } } @@ -1831,6 +1931,16 @@ public long getRestoreThrottleTimeInNanos() { return restoreRateLimitingTimeInNanos.count(); } + @Override + public long getRemoteUploadThrottleTimeInNanos() { + return remoteUploadRateLimitingTimeInNanos.count(); + } + + @Override + public long getRemoteDownloadThrottleTimeInNanos() { + return remoteDownloadRateLimitingTimeInNanos.count(); + } + protected void assertSnapshotOrGenericThread() { assert Thread.currentThread().getName().contains('[' + ThreadPool.Names.SNAPSHOT + ']') || Thread.currentThread().getName().contains('[' + ThreadPool.Names.GENERIC + ']') : "Expected current thread [" @@ -1850,8 +1960,10 @@ public String startVerification() { byte[] testBytes = Strings.toUTF8Bytes(seed); BlobContainer testContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed))); BytesArray bytes = new BytesArray(testBytes); - try (InputStream stream = bytes.streamInput()) { - testContainer.writeBlobAtomic("master.dat", stream, bytes.length(), true); + if (isSystemRepository == false) { + try (InputStream stream = bytes.streamInput()) { + testContainer.writeBlobAtomic("master.dat", stream, bytes.length(), true); + } } return seed; } @@ -2161,6 +2273,11 @@ public boolean isReadOnly() { return readOnly; } + @Override + public boolean isSystemRepository() { + return isSystemRepository; + } + /** * Writing a new index generation is a three step process. * First, the {@link RepositoryMetadata} entry for this repository is set into a pending state by incrementing its @@ -2520,7 +2637,7 @@ private RepositoryMetadata getRepoMetadata(ClusterState state) { * the next version number from when the index blob was written. Each individual index-N blob is * only written once and never overwritten. The highest numbered index-N blob is the latest one * that contains the current snapshots in the repository. - * + *

              * Package private for testing */ long latestIndexBlobId() throws IOException { @@ -3110,20 +3227,80 @@ private static ActionListener fileQueueListener( }); } - private static InputStream maybeRateLimit(InputStream stream, Supplier rateLimiterSupplier, CounterMetric metric) { - return new RateLimitingInputStream(stream, rateLimiterSupplier, metric::inc); + private static void mayBeLogRateLimits(BlobStoreTransferContext context, RateLimiter rateLimiter, long time) { + logger.debug( + () -> new ParameterizedMessage( + "Rate limited blob store transfer, context [{}], for duration [{} ms] for configured rate [{} MBps]", + context, + TimeValue.timeValueNanos(time).millis(), + rateLimiter.getMBPerSec() + ) + ); + } + + private static InputStream maybeRateLimit( + InputStream stream, + Supplier rateLimiterSupplier, + CounterMetric metric, + BlobStoreTransferContext context + ) { + return new RateLimitingInputStream(stream, rateLimiterSupplier, (t) -> { + mayBeLogRateLimits(context, rateLimiterSupplier.get(), t); + metric.inc(t); + }); + } + + private static OffsetRangeInputStream maybeRateLimitRemoteTransfers( + OffsetRangeInputStream offsetRangeInputStream, + Supplier rateLimiterSupplier, + CounterMetric metric, + BlobStoreTransferContext context + ) { + return new RateLimitingOffsetRangeInputStream(offsetRangeInputStream, rateLimiterSupplier, (t) -> { + mayBeLogRateLimits(context, rateLimiterSupplier.get(), t); + metric.inc(t); + }); } public InputStream maybeRateLimitRestores(InputStream stream) { return maybeRateLimit( - maybeRateLimit(stream, () -> restoreRateLimiter, restoreRateLimitingTimeInNanos), + maybeRateLimit(stream, () -> restoreRateLimiter, restoreRateLimitingTimeInNanos, BlobStoreTransferContext.SNAPSHOT_RESTORE), recoverySettings::rateLimiter, - restoreRateLimitingTimeInNanos + restoreRateLimitingTimeInNanos, + BlobStoreTransferContext.SNAPSHOT_RESTORE + ); + } + + public OffsetRangeInputStream maybeRateLimitRemoteUploadTransfers(OffsetRangeInputStream offsetRangeInputStream) { + return maybeRateLimitRemoteTransfers( + offsetRangeInputStream, + () -> remoteUploadRateLimiter, + remoteUploadRateLimitingTimeInNanos, + BlobStoreTransferContext.REMOTE_UPLOAD + ); + } + + public InputStream maybeRateLimitRemoteDownloadTransfers(InputStream inputStream) { + return maybeRateLimit( + maybeRateLimit( + inputStream, + () -> remoteDownloadRateLimiter, + remoteDownloadRateLimitingTimeInNanos, + BlobStoreTransferContext.REMOTE_DOWNLOAD + ), + recoverySettings::rateLimiter, + remoteDownloadRateLimitingTimeInNanos, + BlobStoreTransferContext.REMOTE_DOWNLOAD ); } public InputStream maybeRateLimitSnapshots(InputStream stream) { - return maybeRateLimit(stream, () -> snapshotRateLimiter, snapshotRateLimitingTimeInNanos); + return maybeRateLimit(stream, () -> snapshotRateLimiter, snapshotRateLimitingTimeInNanos, BlobStoreTransferContext.SNAPSHOT); + } + + @Override + public List> getRestrictedSystemRepositorySettings() { + return Arrays.asList(SYSTEM_REPOSITORY_SETTING, READONLY_SETTING, REMOTE_STORE_INDEX_SHALLOW_COPY); } @Override @@ -3147,7 +3324,9 @@ public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, In @Override public void verify(String seed, DiscoveryNode localNode) { - assertSnapshotOrGenericThread(); + if (isSystemRepository == false) { + assertSnapshotOrGenericThread(); + } if (isReadOnly()) { try { latestIndexBlobId(); @@ -3172,30 +3351,33 @@ public void verify(String seed, DiscoveryNode localNode) { exp ); } - try (InputStream masterDat = testBlobContainer.readBlob("master.dat")) { - final String seedRead = Streams.readFully(masterDat).utf8ToString(); - if (seedRead.equals(seed) == false) { + + if (isSystemRepository == false) { + try (InputStream masterDat = testBlobContainer.readBlob("master.dat")) { + final String seedRead = Streams.readFully(masterDat).utf8ToString(); + if (seedRead.equals(seed) == false) { + throw new RepositoryVerificationException( + metadata.name(), + "Seed read from master.dat was [" + seedRead + "] but expected seed [" + seed + "]" + ); + } + } catch (NoSuchFileException e) { throw new RepositoryVerificationException( metadata.name(), - "Seed read from master.dat was [" + seedRead + "] but expected seed [" + seed + "]" + "a file written by cluster-manager to the store [" + + blobStore() + + "] cannot be accessed on the node [" + + localNode + + "]. " + + "This might indicate that the store [" + + blobStore() + + "] is not shared between this node and the cluster-manager node or " + + "that permissions on the store don't allow reading files written by the cluster-manager node", + e ); + } catch (Exception e) { + throw new RepositoryVerificationException(metadata.name(), "Failed to verify repository", e); } - } catch (NoSuchFileException e) { - throw new RepositoryVerificationException( - metadata.name(), - "a file written by cluster-manager to the store [" - + blobStore() - + "] cannot be accessed on the node [" - + localNode - + "]. " - + "This might indicate that the store [" - + blobStore() - + "] is not shared between this node and the cluster-manager node or " - + "that permissions on the store don't allow reading files written by the cluster-manager node", - e - ); - } catch (Exception e) { - throw new RepositoryVerificationException(metadata.name(), "Failed to verify repository", e); } } } @@ -3292,7 +3474,12 @@ private void writeShardIndexBlobAtomic( () -> new ParameterizedMessage("[{}] Writing shard index [{}] to [{}]", metadata.name(), indexGeneration, shardContainer.path()) ); final String blobName = INDEX_SHARD_SNAPSHOTS_FORMAT.blobName(String.valueOf(indexGeneration)); - writeAtomic(shardContainer, blobName, INDEX_SHARD_SNAPSHOTS_FORMAT.serialize(updatedSnapshots, blobName, compressor), true); + writeAtomic( + shardContainer, + blobName, + INDEX_SHARD_SNAPSHOTS_FORMAT.serialize(updatedSnapshots, blobName, compressor, SNAPSHOT_ONLY_FORMAT_PARAMS), + true + ); } // Unused blobs are all previous index-, data- and meta-blobs and that are not referenced by the new index- as well as all @@ -3486,4 +3673,22 @@ private static final class ShardSnapshotMetaDeleteResult { this.blobsToDelete = blobsToDelete; } } + + enum BlobStoreTransferContext { + REMOTE_UPLOAD("remote_upload"), + REMOTE_DOWNLOAD("remote_download"), + SNAPSHOT("snapshot"), + SNAPSHOT_RESTORE("snapshot_restore"); + + private final String name; + + BlobStoreTransferContext(String name) { + this.name = name; + } + + @Override + public String toString() { + return name; + } + } } diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java index 9048757405108..3e6052a5ef820 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -42,7 +42,11 @@ import org.apache.lucene.util.BytesRef; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; +import org.opensearch.common.blobstore.transfer.stream.OffsetRangeIndexInputStream; import org.opensearch.common.io.Streams; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.store.ByteArrayIndexInput; @@ -50,6 +54,7 @@ import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.compress.Compressor; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -58,6 +63,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.gateway.CorruptStateException; +import org.opensearch.index.store.exception.ChecksumCombinationException; import org.opensearch.snapshots.SnapshotInfo; import java.io.IOException; @@ -67,6 +73,8 @@ import java.util.Locale; import java.util.Map; +import static org.opensearch.common.blobstore.transfer.RemoteTransferContainer.checksumOfChecksum; + /** * Snapshot metadata file format used in v2.0 and above * @@ -75,7 +83,7 @@ public final class ChecksumBlobStoreFormat { // Serialization parameters to specify correct context for metadata serialization - private static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; + public static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; static { Map snapshotOnlyParams = new HashMap<>(); @@ -162,12 +170,126 @@ public T deserialize(String blobName, NamedXContentRegistry namedXContentRegistr * @param compressor whether to use compression */ public void write(final T obj, final BlobContainer blobContainer, final String name, final Compressor compressor) throws IOException { + write(obj, blobContainer, name, compressor, SNAPSHOT_ONLY_FORMAT_PARAMS); + } + + /** + * Writes blob with resolving the blob name using {@link #blobName} method. + *

              + * The blob will optionally by compressed. + * + * @param obj object to be serialized + * @param blobContainer blob container + * @param name blob name + * @param compressor whether to use compression + * @param params ToXContent params + */ + public void write( + final T obj, + final BlobContainer blobContainer, + final String name, + final Compressor compressor, + final ToXContent.Params params + ) throws IOException { final String blobName = blobName(name); - final BytesReference bytes = serialize(obj, blobName, compressor); + final BytesReference bytes = serialize(obj, blobName, compressor, params); blobContainer.writeBlob(blobName, bytes.streamInput(), bytes.length(), false); } - public BytesReference serialize(final T obj, final String blobName, final Compressor compressor) throws IOException { + /** + * Internally calls {@link #writeAsyncWithPriority} with {@link WritePriority#NORMAL} + */ + public void writeAsync( + final T obj, + final BlobContainer blobContainer, + final String name, + final Compressor compressor, + ActionListener listener, + final ToXContent.Params params + ) throws IOException { + // use NORMAL priority by default + this.writeAsyncWithPriority(obj, blobContainer, name, compressor, WritePriority.NORMAL, listener, params); + } + + /** + * Internally calls {@link #writeAsyncWithPriority} with {@link WritePriority#URGENT} + *

              + * NOTE: We use this method to upload urgent priority objects like cluster state to remote stores. + * Use {@link #writeAsync(ToXContent, BlobContainer, String, Compressor, ActionListener, ToXContent.Params)} for + * other use cases. + */ + public void writeAsyncWithUrgentPriority( + final T obj, + final BlobContainer blobContainer, + final String name, + final Compressor compressor, + ActionListener listener, + final ToXContent.Params params + ) throws IOException { + this.writeAsyncWithPriority(obj, blobContainer, name, compressor, WritePriority.URGENT, listener, params); + } + + /** + * Method to writes blob with resolving the blob name using {@link #blobName} method with specified + * {@link WritePriority}. Leverages the multipart upload if supported by the blobContainer. + * + * @param obj object to be serialized + * @param blobContainer blob container + * @param name blob name + * @param compressor whether to use compression + * @param priority write priority to be used + * @param listener listener to listen to write result + * @param params ToXContent params + */ + private void writeAsyncWithPriority( + final T obj, + final BlobContainer blobContainer, + final String name, + final Compressor compressor, + final WritePriority priority, + ActionListener listener, + final ToXContent.Params params + ) throws IOException { + if (blobContainer instanceof AsyncMultiStreamBlobContainer == false) { + write(obj, blobContainer, name, compressor, params); + listener.onResponse(null); + return; + } + final String blobName = blobName(name); + final BytesReference bytes = serialize(obj, blobName, compressor, params); + final String resourceDescription = "ChecksumBlobStoreFormat.writeAsyncWithPriority(blob=\"" + blobName + "\")"; + try (IndexInput input = new ByteArrayIndexInput(resourceDescription, BytesReference.toBytes(bytes))) { + long expectedChecksum; + try { + expectedChecksum = checksumOfChecksum(input.clone(), 8); + } catch (Exception e) { + throw new ChecksumCombinationException( + "Potentially corrupted file: Checksum combination failed while combining stored checksum " + + "and calculated checksum of stored checksum", + resourceDescription, + e + ); + } + + try ( + RemoteTransferContainer remoteTransferContainer = new RemoteTransferContainer( + blobName, + blobName, + bytes.length(), + true, + priority, + (size, position) -> new OffsetRangeIndexInputStream(input, size, position), + expectedChecksum, + ((AsyncMultiStreamBlobContainer) blobContainer).remoteIntegrityCheckSupported() + ) + ) { + ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload(remoteTransferContainer.createWriteContext(), listener); + } + } + } + + public BytesReference serialize(final T obj, final String blobName, final Compressor compressor, final ToXContent.Params params) + throws IOException { try (BytesStreamOutput outputStream = new BytesStreamOutput()) { try ( OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput( @@ -191,7 +313,7 @@ public void close() throws IOException { ) ) { builder.startObject(); - obj.toXContent(builder, SNAPSHOT_ONLY_FORMAT_PARAMS); + obj.toXContent(builder, params); builder.endObject(); } CodecUtil.writeFooter(indexOutput); diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java index 54f226e81025e..0651ff586d412 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/MeteredBlobStoreRepository.java @@ -34,12 +34,10 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.UUIDs; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.repositories.RepositoryInfo; import org.opensearch.repositories.RepositoryStatsSnapshot; -import org.opensearch.threadpool.ThreadPool; import java.util.Map; @@ -53,29 +51,24 @@ public abstract class MeteredBlobStoreRepository extends BlobStoreRepository { public MeteredBlobStoreRepository( RepositoryMetadata metadata, - boolean compress, NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, RecoverySettings recoverySettings, Map location ) { - super(metadata, compress, namedXContentRegistry, clusterService, recoverySettings); - ThreadPool threadPool = clusterService.getClusterApplierService().threadPool(); - this.repositoryInfo = new RepositoryInfo( - UUIDs.randomBase64UUID(), - metadata.name(), - metadata.type(), - location, - threadPool.absoluteTimeInMillis() - ); + super(metadata, namedXContentRegistry, clusterService, recoverySettings); + this.repositoryInfo = new RepositoryInfo(metadata.name(), metadata.type(), location); } - public RepositoryStatsSnapshot statsSnapshot() { - return new RepositoryStatsSnapshot(repositoryInfo, stats(), RepositoryStatsSnapshot.UNKNOWN_CLUSTER_VERSION, false); + @Override + public void reload(RepositoryMetadata repositoryMetadata) { + super.reload(repositoryMetadata); + + // Not adding any additional reload logic here is intentional as the constructor only + // initializes the repositoryInfo from the repo metadata, which cannot be changed. } - public RepositoryStatsSnapshot statsSnapshotForArchival(long clusterVersion) { - RepositoryInfo stoppedRepoInfo = repositoryInfo.stopped(threadPool.absoluteTimeInMillis()); - return new RepositoryStatsSnapshot(stoppedRepoInfo, stats(), clusterVersion, true); + public RepositoryStatsSnapshot statsSnapshot() { + return new RepositoryStatsSnapshot(repositoryInfo, stats(), RepositoryStatsSnapshot.UNKNOWN_CLUSTER_VERSION); } } diff --git a/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java b/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java index 3009466f03635..4a9a91336ec1d 100644 --- a/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java +++ b/server/src/main/java/org/opensearch/repositories/fs/FsRepository.java @@ -50,6 +50,8 @@ import org.opensearch.repositories.blobstore.BlobStoreRepository; import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; import java.util.function.Function; /** @@ -61,7 +63,6 @@ *

              {@code concurrent_streams}
              Number of concurrent read/write stream (per repository on each node). Defaults to 5.
              *
              {@code chunk_size}
              Large file can be divided into chunks. This parameter specifies the chunk size. * Defaults to not chucked.
              - *
              {@code compress}
              If set to true metadata files will be stored compressed. Defaults to false.
              * * * @opensearch.internal @@ -101,11 +102,11 @@ public class FsRepository extends BlobStoreRepository { public static final Setting BASE_PATH_SETTING = Setting.simpleString("base_path"); - private final Environment environment; + protected final Environment environment; - private ByteSizeValue chunkSize; + protected ByteSizeValue chunkSize; - private final BlobPath basePath; + protected BlobPath basePath; /** * Constructs a shared file system repository. @@ -117,8 +118,27 @@ public FsRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - super(metadata, calculateCompress(metadata, environment), namedXContentRegistry, clusterService, recoverySettings); + super(metadata, namedXContentRegistry, clusterService, recoverySettings); this.environment = environment; + validateLocation(); + readMetadata(); + } + + protected void readMetadata() { + if (CHUNK_SIZE_SETTING.exists(metadata.settings())) { + this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); + } else { + this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(environment.settings()); + } + final String basePath = BASE_PATH_SETTING.get(metadata.settings()); + if (Strings.hasLength(basePath)) { + this.basePath = new BlobPath().add(basePath); + } else { + this.basePath = BlobPath.cleanPath(); + } + } + + protected void validateLocation() { String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings()); if (location.isEmpty()) { logger.warn( @@ -151,24 +171,6 @@ public FsRepository( ); } } - - if (CHUNK_SIZE_SETTING.exists(metadata.settings())) { - this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); - } else { - this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(environment.settings()); - } - final String basePath = BASE_PATH_SETTING.get(metadata.settings()); - if (Strings.hasLength(basePath)) { - this.basePath = new BlobPath().add(basePath); - } else { - this.basePath = BlobPath.cleanPath(); - } - } - - private static boolean calculateCompress(RepositoryMetadata metadata, Environment environment) { - return COMPRESS_SETTING.exists(metadata.settings()) - ? COMPRESS_SETTING.get(metadata.settings()) - : REPOSITORIES_COMPRESS_SETTING.get(environment.settings()); } @Override @@ -187,4 +189,12 @@ protected ByteSizeValue chunkSize() { public BlobPath basePath() { return basePath; } + + @Override + public List> getRestrictedSystemRepositorySettings() { + List> restrictedSettings = new ArrayList<>(); + restrictedSettings.addAll(super.getRestrictedSystemRepositorySettings()); + restrictedSettings.add(LOCATION_SETTING); + return restrictedSettings; + } } diff --git a/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java b/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java new file mode 100644 index 0000000000000..c06c805a39396 --- /dev/null +++ b/server/src/main/java/org/opensearch/repositories/fs/ReloadableFsRepository.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.fs; + +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.indices.recovery.RecoverySettings; + +/** + * Extension of {@link FsRepository} that can be reloaded inplace + * + * @opensearch.internal + */ +public class ReloadableFsRepository extends FsRepository { + /** + * Constructs a shared file system repository that is reloadable in-place. + */ + public ReloadableFsRepository( + RepositoryMetadata metadata, + Environment environment, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { + super(metadata, environment, namedXContentRegistry, clusterService, recoverySettings); + } + + @Override + public boolean isReloadable() { + return true; + } + + @Override + public void reload(RepositoryMetadata repositoryMetadata) { + if (isReloadable() == false) { + return; + } + + super.reload(repositoryMetadata); + validateLocation(); + readMetadata(); + } +} diff --git a/server/src/main/java/org/opensearch/rest/RestController.java b/server/src/main/java/org/opensearch/rest/RestController.java index ac30f999d0da7..cc48b59699a17 100644 --- a/server/src/main/java/org/opensearch/rest/RestController.java +++ b/server/src/main/java/org/opensearch/rest/RestController.java @@ -524,7 +524,7 @@ private void handleBadRequest(String uri, RestRequest.Method method, RestChannel /** * Attempts to extract auth token and login. * - * @returns false if there was an error and the request should not continue being dispatched + * @return false if there was an error and the request should not continue being dispatched * */ private boolean handleAuthenticateUser(final RestRequest request, final RestChannel channel) { try { diff --git a/server/src/main/java/org/opensearch/rest/RestHandler.java b/server/src/main/java/org/opensearch/rest/RestHandler.java index 7832649e8ad32..294dc3ffbe329 100644 --- a/server/src/main/java/org/opensearch/rest/RestHandler.java +++ b/server/src/main/java/org/opensearch/rest/RestHandler.java @@ -108,7 +108,7 @@ default List replacedRoutes() { } /** - * Controls whether requests handled by this class are allowed to to access system indices by default. + * Controls whether requests handled by this class are allowed to access system indices by default. * @return {@code true} if requests handled by this class should be allowed to access system indices. */ default boolean allowSystemIndexAccessByDefault() { diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationAction.java index ae00344bce446..cba42156deead 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationAction.java @@ -170,7 +170,7 @@ public Table buildSegmentReplicationTable(RestRequest request, SegmentReplicatio t.addCell(state.getTargetNode().getHostName()); t.addCell(shardStats.getCheckpointsBehindCount()); t.addCell(new ByteSizeValue(shardStats.getBytesBehindCount())); - t.addCell(new TimeValue(shardStats.getCurrentReplicationTimeMillis())); + t.addCell(new TimeValue(shardStats.getCurrentReplicationLagMillis())); t.addCell(new TimeValue(shardStats.getLastCompletedReplicationTimeMillis())); t.addCell(perGroupStats.getRejectedRequestCount()); if (detailed) { diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java index 2781c09de8614..dea4f02aef5bd 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java @@ -53,6 +53,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.common.Strings; @@ -574,6 +575,31 @@ protected Table getTableWithHeader(final RestRequest request) { "sibling:pri;alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops" ); table.addCell("pri.search.query_total", "default:false;text-align:right;desc:total query phase ops"); + if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { + table.addCell( + "search.concurrent_query_current", + "sibling:pri;alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" + ); + table.addCell("pri.search.concurrent_query_current", "default:false;text-align:right;desc:current concurrent query phase ops"); + + table.addCell( + "search.concurrent_query_time", + "sibling:pri;alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" + ); + table.addCell("pri.search.concurrent_query_time", "default:false;text-align:right;desc:time spent in concurrent query phase"); + + table.addCell( + "search.concurrent_query_total", + "sibling:pri;alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total query phase ops" + ); + table.addCell("pri.search.concurrent_query_total", "default:false;text-align:right;desc:total query phase ops"); + + table.addCell( + "search.concurrent_avg_slice_count", + "sibling:pri;alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" + ); + table.addCell("pri.search.concurrent_avg_slice_count", "default:false;text-align:right;desc:average query concurrency"); + } table.addCell( "search.scroll_current", @@ -883,6 +909,20 @@ Table buildTable( table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getQueryCount()); table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getQueryCount()); + if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryCurrent()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryCurrent()); + + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryTime()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryTime()); + + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentQueryCount()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentQueryCount()); + + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getConcurrentAvgSliceCount()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getConcurrentAvgSliceCount()); + } + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getScrollCurrent()); table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getScrollCurrent()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index da4bd9d71732e..bf8b27f378311 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -47,6 +47,7 @@ import org.opensearch.common.Table; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.network.NetworkAddress; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeValue; @@ -303,6 +304,24 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("search.query_current", "alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops"); table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase"); table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops"); + if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { + table.addCell( + "search.concurrent_query_current", + "alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" + ); + table.addCell( + "search.concurrent_query_time", + "alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" + ); + table.addCell( + "search.concurrent_query_total", + "alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total concurrent query phase ops" + ); + table.addCell( + "search.concurrent_avg_slice_count", + "alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" + ); + } table.addCell("search.scroll_current", "alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts"); table.addCell( "search.scroll_time", @@ -529,6 +548,12 @@ Table buildTable( table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCurrent()); table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryTime()); table.addCell(searchStats == null ? null : searchStats.getTotal().getQueryCount()); + if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { + table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryCurrent()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryTime()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentQueryCount()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getConcurrentAvgSliceCount()); + } table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCurrent()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollTime()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCount()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestPitSegmentsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestPitSegmentsAction.java index ba9606e8eb444..5fc6c961b4637 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestPitSegmentsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestPitSegmentsAction.java @@ -120,23 +120,7 @@ private Table buildTable(final RestRequest request, Map i Table table = getTableWithHeader(request); DiscoveryNodes nodes = this.nodesInCluster.get(); - table.startRow(); - table.addCell("index", "default:true;alias:i,idx;desc:index name"); - table.addCell("shard", "default:true;alias:s,sh;desc:shard name"); - table.addCell("prirep", "alias:p,pr,primaryOrReplica;default:true;desc:primary or replica"); - table.addCell("ip", "default:true;desc:ip of node where it lives"); - table.addCell("id", "default:false;desc:unique id of node where it lives"); - table.addCell("segment", "default:true;alias:seg;desc:segment name"); - table.addCell("generation", "default:true;alias:g,gen;text-align:right;desc:segment generation"); - table.addCell("docs.count", "default:true;alias:dc,docsCount;text-align:right;desc:number of docs in segment"); - table.addCell("docs.deleted", "default:true;alias:dd,docsDeleted;text-align:right;desc:number of deleted docs in segment"); - table.addCell("size", "default:true;alias:si;text-align:right;desc:segment size in bytes"); - table.addCell("size.memory", "default:true;alias:sm,sizeMemory;text-align:right;desc:segment memory in bytes"); - table.addCell("committed", "default:true;alias:ic,isCommitted;desc:is segment committed"); - table.addCell("searchable", "default:true;alias:is,isSearchable;desc:is segment searched"); - table.addCell("version", "default:true;alias:v,ver;desc:version"); - table.addCell("compound", "default:true;alias:ico,isCompound;desc:is segment compound"); - table.endRow(); + for (IndexSegments indexSegments : indicesSegments.values()) { Map shards = indexSegments.getShards(); for (IndexShardSegments indexShardSegments : shards.values()) { diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java index cadc6ef730350..06b31270f7c65 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java @@ -44,6 +44,7 @@ import org.opensearch.common.Table; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.index.cache.query.QueryCacheStats; import org.opensearch.index.engine.CommitStats; @@ -219,6 +220,24 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("search.query_current", "alias:sqc,searchQueryCurrent;default:false;text-align:right;desc:current query phase ops"); table.addCell("search.query_time", "alias:sqti,searchQueryTime;default:false;text-align:right;desc:time spent in query phase"); table.addCell("search.query_total", "alias:sqto,searchQueryTotal;default:false;text-align:right;desc:total query phase ops"); + if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { + table.addCell( + "search.concurrent_query_current", + "alias:scqc,searchConcurrentQueryCurrent;default:false;text-align:right;desc:current concurrent query phase ops" + ); + table.addCell( + "search.concurrent_query_time", + "alias:scqti,searchConcurrentQueryTime;default:false;text-align:right;desc:time spent in concurrent query phase" + ); + table.addCell( + "search.concurrent_query_total", + "alias:scqto,searchConcurrentQueryTotal;default:false;text-align:right;desc:total concurrent query phase ops" + ); + table.addCell( + "search.concurrent_avg_slice_count", + "alias:casc,searchConcurrentAvgSliceCount;default:false;text-align:right;desc:average query concurrency" + ); + } table.addCell("search.scroll_current", "alias:scc,searchScrollCurrent;default:false;text-align:right;desc:open scroll contexts"); table.addCell( "search.scroll_time", @@ -399,6 +418,13 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getQueryCurrent())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getQueryTime())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getQueryCount())); + if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryCurrent())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryTime())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentQueryCount())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getConcurrentAvgSliceCount())); + + } table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCurrent())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollTime())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCount())); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestThreadPoolAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestThreadPoolAction.java index f71001f55139a..7a6ff856fd2e9 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestThreadPoolAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestThreadPoolAction.java @@ -163,6 +163,10 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("rejected", "alias:r;default:true;text-align:right;desc:number of rejected tasks"); table.addCell("largest", "alias:l;default:false;text-align:right;desc:highest number of seen active threads"); table.addCell("completed", "alias:c;default:false;text-align:right;desc:number of completed tasks"); + table.addCell( + "total_wait_time", + "alias:twt;default:false;text-align:right;desc:total time tasks spent waiting in thread_pool queue" + ); table.addCell("core", "alias:cr;default:false;text-align:right;desc:core number of threads in a scaling thread pool"); table.addCell("max", "alias:mx;default:false;text-align:right;desc:maximum number of threads in a scaling thread pool"); table.addCell("size", "alias:sz;default:false;text-align:right;desc:number of threads in a fixed thread pool"); @@ -267,6 +271,7 @@ private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoR table.addCell(poolStats == null ? null : poolStats.getRejected()); table.addCell(poolStats == null ? null : poolStats.getLargest()); table.addCell(poolStats == null ? null : poolStats.getCompleted()); + table.addCell(poolStats == null ? null : poolStats.getWaitTime()); table.addCell(core); table.addCell(max); table.addCell(size); diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java index ebfd082d974fd..080366e536da1 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java @@ -180,6 +180,12 @@ public static void parseSearchRequest( searchRequest.allowPartialSearchResults(request.paramAsBoolean("allow_partial_search_results", null)); } + if (request.hasParam("phase_took")) { + // only set if we have the parameter passed to override the cluster-level default + // else phaseTook = null + searchRequest.setPhaseTook(request.paramAsBoolean("phase_took", true)); + } + // do not allow 'query_and_fetch' or 'dfs_query_and_fetch' search types // from the REST layer. these modes are an internal optimization and should // not be specified explicitly by the user. diff --git a/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java b/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java index f71fdeadfe8b0..51f3a64846f4d 100644 --- a/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java +++ b/server/src/main/java/org/opensearch/script/ScoreScriptUtils.java @@ -49,7 +49,6 @@ import static org.opensearch.common.util.BitMixer.mix32; import static org.opensearch.index.query.functionscore.TermFrequencyFunctionFactory.TermFrequencyFunctionName.SUM_TOTAL_TERM_FREQ; import static org.opensearch.index.query.functionscore.TermFrequencyFunctionFactory.TermFrequencyFunctionName.TERM_FREQ; -import static org.opensearch.index.query.functionscore.TermFrequencyFunctionFactory.TermFrequencyFunctionName.TF; import static org.opensearch.index.query.functionscore.TermFrequencyFunctionFactory.TermFrequencyFunctionName.TOTAL_TERM_FREQ; /** @@ -94,27 +93,6 @@ public int termFreq(String field, String term) { } } - /** - * Calculates the term frequency-inverse document frequency (tf-idf) for a specific term within a field. - * - * @opensearch.internal - */ - public static final class TF { - private final ScoreScript scoreScript; - - public TF(ScoreScript scoreScript) { - this.scoreScript = scoreScript; - } - - public float tf(String field, String term) { - try { - return (float) scoreScript.getTermFrequency(TF, field, term); - } catch (Exception e) { - throw ExceptionsHelper.convertToOpenSearchException(e); - } - } - } - /** * Retrieves the total term frequency within a field for a specific term. * @@ -364,11 +342,14 @@ public double decayNumericGauss(double docValue) { /** * Limitations: since script functions don't have access to DateFieldMapper, * decay functions on dates are limited to dates in the default format and default time zone, + * Further, since script module gets initialized before the featureflags are loaded, + * we cannot use the feature flag to gate the usage of the new default date format. * Also, using calculations with now are not allowed. * */ private static final ZoneId defaultZoneId = ZoneId.of("UTC"); - private static final DateMathParser dateParser = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser(); + // ToDo: use new default date formatter once feature flag is removed + private static final DateMathParser dateParser = DateFieldMapper.LEGACY_DEFAULT_DATE_TIME_FORMATTER.toDateMathParser(); /** * Linear date decay diff --git a/server/src/main/java/org/opensearch/script/Script.java b/server/src/main/java/org/opensearch/script/Script.java index ed88737a5b87e..9e74314c281cd 100644 --- a/server/src/main/java/org/opensearch/script/Script.java +++ b/server/src/main/java/org/opensearch/script/Script.java @@ -33,6 +33,7 @@ package org.opensearch.script; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.LoggingDeprecationHandler; @@ -69,9 +70,9 @@ * {@link Script} represents used-defined input that can be used to * compile and execute a script from the {@link ScriptService} * based on the {@link ScriptType}. - * + *

              * There are three types of scripts specified by {@link ScriptType}. - * + *

              * The following describes the expected parameters for each type of script: * *

                @@ -96,8 +97,9 @@ *
              *
            * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class Script implements ToXContentObject, Writeable { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(Script.class); @@ -346,16 +348,16 @@ public static Script parse(Settings settings) { /** * This will parse XContent into a {@link Script}. The following formats can be parsed: - * + *

            * The simple format defaults to an {@link ScriptType#INLINE} with no compiler options or user-defined params: - * + *

            * Example: * {@code * "return Math.log(doc.popularity) * 100;" * } * * The complex format where {@link ScriptType} and idOrCode are required while lang, options and params are not required. - * + *

            * {@code * { * // Exactly one of "id" or "source" must be specified @@ -389,7 +391,7 @@ public static Script parse(Settings settings) { * * This also handles templates in a special way. If a complexly formatted query is specified as another complex * JSON object the query is assumed to be a template, and the format will be preserved. - * + *

            * {@code * { * "source" : { "query" : ... }, @@ -603,7 +605,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * This will build scripts into the following XContent structure: - * + *

            * {@code * { * "<(id, source)>" : "", @@ -633,10 +635,10 @@ public void writeTo(StreamOutput out) throws IOException { * } * * Note that lang, options, and params will only be included if there have been any specified. - * + *

            * This also handles templates in a special way. If the {@link Script#CONTENT_TYPE_OPTION} option * is provided and the {@link ScriptType#INLINE} is specified then the template will be preserved as a raw field. - * + *

            * {@code * { * "source" : { "query" : ... }, diff --git a/server/src/main/java/org/opensearch/script/ScriptCache.java b/server/src/main/java/org/opensearch/script/ScriptCache.java index 3c50fc12dcacb..fb57e7cdfa5bd 100644 --- a/server/src/main/java/org/opensearch/script/ScriptCache.java +++ b/server/src/main/java/org/opensearch/script/ScriptCache.java @@ -158,7 +158,7 @@ public ScriptContextStats stats(String context) { /** * Check whether there have been too many compilations within the last minute, throwing a circuit breaking exception if so. * This is a variant of the token bucket algorithm: https://en.wikipedia.org/wiki/Token_bucket - * + *

            * It can be thought of as a bucket with water, every time the bucket is checked, water is added proportional to the amount of time that * elapsed since the last time it was checked. If there is enough water, some is removed and the request is allowed. If there is not * enough water the request is denied. Just like a normal bucket, if water is added that overflows the bucket, the extra water/capacity diff --git a/server/src/main/java/org/opensearch/script/ScriptContext.java b/server/src/main/java/org/opensearch/script/ScriptContext.java index 27ad1f3ce03c8..2180b6059dbef 100644 --- a/server/src/main/java/org/opensearch/script/ScriptContext.java +++ b/server/src/main/java/org/opensearch/script/ScriptContext.java @@ -32,6 +32,7 @@ package org.opensearch.script; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.collect.Tuple; import org.opensearch.common.unit.TimeValue; @@ -39,7 +40,7 @@ /** * The information necessary to compile and run a script. - * + *

            * A {@link ScriptContext} contains the information related to a single use case and the interfaces * and methods necessary for a {@link ScriptEngine} to implement. *

            @@ -70,8 +71,9 @@ * If the variable name starts with an underscore, for example, {@code _score}, the needs method would * be {@code boolean needs_score()}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public final class ScriptContext { /** A unique identifier for this context. */ diff --git a/server/src/main/java/org/opensearch/script/ScriptEngine.java b/server/src/main/java/org/opensearch/script/ScriptEngine.java index 418fbed52da30..560727bc8fa97 100644 --- a/server/src/main/java/org/opensearch/script/ScriptEngine.java +++ b/server/src/main/java/org/opensearch/script/ScriptEngine.java @@ -32,6 +32,8 @@ package org.opensearch.script; +import org.opensearch.common.annotation.PublicApi; + import java.io.Closeable; import java.io.IOException; import java.util.Map; @@ -40,8 +42,9 @@ /** * A script language implementation. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public interface ScriptEngine extends Closeable { /** diff --git a/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java b/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java index ef64959f99a3c..30756ff702e8f 100644 --- a/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java +++ b/server/src/main/java/org/opensearch/script/ScriptLanguagesInfo.java @@ -56,11 +56,11 @@ /** * The allowable types, languages and their corresponding contexts. When serialized there is a top level types_allowed list, * meant to reflect the setting script.allowed_types with the allowed types (eg inline, stored). - * + *

            * The top-level language_contexts list of objects have the language (eg. painless, * mustache) and a list of contexts available for the language. It is the responsibility of the caller to ensure * these contexts are filtered by the script.allowed_contexts setting. - * + *

            * The json serialization of the object has the form: * * { diff --git a/server/src/main/java/org/opensearch/script/ScriptMetadata.java b/server/src/main/java/org/opensearch/script/ScriptMetadata.java index 5f529fccd213c..fd92d8f7f02db 100644 --- a/server/src/main/java/org/opensearch/script/ScriptMetadata.java +++ b/server/src/main/java/org/opensearch/script/ScriptMetadata.java @@ -183,9 +183,9 @@ static ScriptMetadata deleteStoredScript(ScriptMetadata previous, String id) { /** * This will parse XContent into {@link ScriptMetadata}. - * + *

            * The following format will be parsed: - * + *

            * {@code * { * "" : "<{@link StoredScriptSource#fromXContent(XContentParser, boolean)}>", @@ -356,7 +356,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * This will write XContent from {@link ScriptMetadata}. The following format will be written: - * + *

            * {@code * { * "" : "<{@link StoredScriptSource#toXContent(XContentBuilder, Params)}>", diff --git a/server/src/main/java/org/opensearch/script/ScriptService.java b/server/src/main/java/org/opensearch/script/ScriptService.java index f0e6bd5d54422..d3c8861dbc5d7 100644 --- a/server/src/main/java/org/opensearch/script/ScriptService.java +++ b/server/src/main/java/org/opensearch/script/ScriptService.java @@ -46,6 +46,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; @@ -75,8 +76,9 @@ /** * Service for scripting * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ScriptService implements Closeable, ClusterStateApplier { private static final Logger logger = LogManager.getLogger(ScriptService.class); diff --git a/server/src/main/java/org/opensearch/script/ScriptType.java b/server/src/main/java/org/opensearch/script/ScriptType.java index 5f505c781bd0a..c39edcbcb12c4 100644 --- a/server/src/main/java/org/opensearch/script/ScriptType.java +++ b/server/src/main/java/org/opensearch/script/ScriptType.java @@ -32,6 +32,7 @@ package org.opensearch.script; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -45,8 +46,9 @@ * It's also used to by {@link ScriptService} to determine whether or not a {@link Script} is * allowed to be executed based on both default and user-defined settings. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public enum ScriptType implements Writeable { /** diff --git a/server/src/main/java/org/opensearch/script/StoredScriptSource.java b/server/src/main/java/org/opensearch/script/StoredScriptSource.java index 0ff44b3af890a..d1dae67d0e55f 100644 --- a/server/src/main/java/org/opensearch/script/StoredScriptSource.java +++ b/server/src/main/java/org/opensearch/script/StoredScriptSource.java @@ -308,7 +308,7 @@ public static StoredScriptSource parse(BytesReference content, MediaType mediaTy /** * This will parse XContent into a {@link StoredScriptSource}. The following format is what will be parsed: - * + *

            * {@code * { * "script" : { @@ -387,7 +387,7 @@ public void writeTo(StreamOutput out) throws IOException { /** * This will write XContent from a {@link StoredScriptSource}. The following format will be written: - * + *

            * {@code * { * "script" : { diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index f64c17e873596..c229c3924688d 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -45,6 +45,7 @@ import org.opensearch.action.search.SearchType; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; +import org.opensearch.common.SetOnce; import org.opensearch.common.lease.Releasables; import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.unit.TimeValue; @@ -183,7 +184,8 @@ final class DefaultSearchContext extends SearchContext { private final QueryShardContext queryShardContext; private final FetchPhase fetchPhase; private final Function requestToAggReduceContextBuilder; - private final boolean useConcurrentSearch; + private final boolean concurrentSearchSettingsEnabled; + private final SetOnce requestShouldUseConcurrentSearch = new SetOnce<>(); DefaultSearchContext( ReaderContext readerContext, @@ -214,14 +216,14 @@ final class DefaultSearchContext extends SearchContext { this.indexShard = readerContext.indexShard(); this.clusterService = clusterService; this.engineSearcher = readerContext.acquireSearcher("search"); - this.useConcurrentSearch = useConcurrentSearch(executor); + this.concurrentSearchSettingsEnabled = evaluateConcurrentSegmentSearchSettings(executor); this.searcher = new ContextIndexSearcher( engineSearcher.getIndexReader(), engineSearcher.getSimilarity(), engineSearcher.getQueryCache(), engineSearcher.getQueryCachingPolicy(), lowLevelCancellation, - useConcurrentSearch ? executor : null, + concurrentSearchSettingsEnabled ? executor : null, this ); this.relativeTimeSupplier = relativeTimeSupplier; @@ -876,11 +878,29 @@ public Profilers getProfilers() { } /** - * Returns concurrent segment search status for the search context + * Returns concurrent segment search status for the search context. This should only be used after request parsing, during which requestShouldUseConcurrentSearch will be set. */ @Override - public boolean isConcurrentSegmentSearchEnabled() { - return useConcurrentSearch; + public boolean shouldUseConcurrentSearch() { + assert requestShouldUseConcurrentSearch.get() != null : "requestShouldUseConcurrentSearch must be set"; + return concurrentSearchSettingsEnabled && Boolean.TRUE.equals(requestShouldUseConcurrentSearch.get()); + } + + /** + * Evaluate if parsed request supports concurrent segment search + */ + public void evaluateRequestShouldUseConcurrentSearch() { + if (sort != null && sort.isSortOnTimeSeriesField()) { + requestShouldUseConcurrentSearch.set(false); + } else if (aggregations() != null + && aggregations().factories() != null + && !aggregations().factories().allFactoriesSupportConcurrentSearch()) { + requestShouldUseConcurrentSearch.set(false); + } else if (terminateAfter != DEFAULT_TERMINATE_AFTER) { + requestShouldUseConcurrentSearch.set(false); + } else { + requestShouldUseConcurrentSearch.set(true); + } } public void setProfilers(Profilers profilers) { @@ -910,7 +930,7 @@ public ReaderContext readerContext() { @Override public InternalAggregation.ReduceContext partialOnShard() { InternalAggregation.ReduceContext rc = requestToAggReduceContextBuilder.apply(request.source()).forPartialReduction(); - rc.setSliceLevel(isConcurrentSegmentSearchEnabled()); + rc.setSliceLevel(shouldUseConcurrentSearch()); return rc; } @@ -929,7 +949,7 @@ public BucketCollectorProcessor bucketCollectorProcessor() { * @return true: use concurrent search * false: otherwise */ - private boolean useConcurrentSearch(Executor concurrentSearchExecutor) { + private boolean evaluateConcurrentSegmentSearchSettings(Executor concurrentSearchExecutor) { if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH) && (clusterService != null) && (concurrentSearchExecutor != null)) { @@ -943,4 +963,21 @@ private boolean useConcurrentSearch(Executor concurrentSearchExecutor) { return false; } } + + @Override + public boolean shouldUseTimeSeriesDescSortOptimization() { + return indexShard.isTimeSeriesDescSortOptimizationEnabled() + && sort != null + && sort.isSortOnTimeSeriesField() + && sort.sort.getSort()[0].getReverse() == false; + } + + @Override + public int getTargetMaxSliceCount() { + if (shouldUseConcurrentSearch() == false) { + throw new IllegalStateException("Target slice count should not be used when concurrent search is disabled"); + } + return clusterService.getClusterSettings().get(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING); + } + } diff --git a/server/src/main/java/org/opensearch/search/DocValueFormat.java b/server/src/main/java/org/opensearch/search/DocValueFormat.java index febb270ea917c..5439212fb86a0 100644 --- a/server/src/main/java/org/opensearch/search/DocValueFormat.java +++ b/server/src/main/java/org/opensearch/search/DocValueFormat.java @@ -35,6 +35,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.common.Numbers; import org.opensearch.common.joda.Joda; import org.opensearch.common.joda.JodaDateFormatter; @@ -247,7 +248,10 @@ public DateTime(DateFormatter formatter, ZoneId timeZone, DateFieldMapper.Resolu public DateTime(StreamInput in) throws IOException { String datePattern = in.readString(); - + String printPattern = null; + if (in.getVersion().onOrAfter(Version.V_2_12_0)) { + printPattern = in.readOptionalString(); + } String zoneId = in.readString(); if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { this.timeZone = DateUtils.of(zoneId); @@ -271,7 +275,7 @@ public DateTime(StreamInput in) throws IOException { */ isJoda = Joda.isJodaPattern(in.getVersion(), datePattern); } - this.formatter = isJoda ? Joda.forPattern(datePattern) : DateFormatter.forPattern(datePattern); + this.formatter = isJoda ? Joda.forPattern(datePattern) : DateFormatter.forPattern(datePattern, printPattern); this.parser = formatter.toDateMathParser(); @@ -284,7 +288,14 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(formatter.pattern()); + if (out.getVersion().before(Version.V_2_12_0) && formatter.equals(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER)) { + out.writeString(DateFieldMapper.LEGACY_DEFAULT_DATE_TIME_FORMATTER.pattern()); // required for backwards compatibility + } else { + out.writeString(formatter.pattern()); + } + if (out.getVersion().onOrAfter(Version.V_2_12_0)) { + out.writeOptionalString(formatter.printPattern()); + } if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); } else { diff --git a/server/src/main/java/org/opensearch/search/GenericSearchExtBuilder.java b/server/src/main/java/org/opensearch/search/GenericSearchExtBuilder.java new file mode 100644 index 0000000000000..35e68f78774e3 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/GenericSearchExtBuilder.java @@ -0,0 +1,165 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.search; + +import org.opensearch.core.ParseField; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParseException; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * This is a catch-all SearchExtBuilder implementation that is used when an appropriate SearchExtBuilder + * is not found during SearchResponse's fromXContent operation. + */ +public final class GenericSearchExtBuilder extends SearchExtBuilder { + + public final static ParseField EXT_BUILDER_NAME = new ParseField("generic_ext"); + + private final Object genericObj; + private final ValueType valueType; + + enum ValueType { + SIMPLE(0), + MAP(1), + LIST(2); + + private final int value; + + ValueType(int value) { + this.value = value; + } + + public int getValue() { + return value; + } + + static ValueType fromInt(int value) { + switch (value) { + case 0: + return SIMPLE; + case 1: + return MAP; + case 2: + return LIST; + default: + throw new IllegalArgumentException("Unsupported value: " + value); + } + } + } + + public GenericSearchExtBuilder(Object genericObj, ValueType valueType) { + this.genericObj = genericObj; + this.valueType = valueType; + } + + public GenericSearchExtBuilder(StreamInput in) throws IOException { + valueType = ValueType.fromInt(in.readInt()); + switch (valueType) { + case SIMPLE: + genericObj = in.readGenericValue(); + break; + case MAP: + genericObj = in.readMap(); + break; + case LIST: + genericObj = in.readList(r -> r.readGenericValue()); + break; + default: + throw new IllegalStateException("Unable to construct GenericSearchExtBuilder from incoming stream."); + } + } + + public static GenericSearchExtBuilder fromXContent(XContentParser parser) throws IOException { + // Look at the parser's next token. + // If it's START_OBJECT, parse as map, if it's START_ARRAY, parse as list, else + // parse as simpleVal + XContentParser.Token token = parser.currentToken(); + ValueType valueType; + Object genericObj; + if (token == XContentParser.Token.START_OBJECT) { + genericObj = parser.map(); + valueType = ValueType.MAP; + } else if (token == XContentParser.Token.START_ARRAY) { + genericObj = parser.list(); + valueType = ValueType.LIST; + } else if (token.isValue()) { + genericObj = parser.objectText(); + valueType = ValueType.SIMPLE; + } else { + throw new XContentParseException("Unknown token: " + token); + } + + return new GenericSearchExtBuilder(genericObj, valueType); + } + + @Override + public String getWriteableName() { + return EXT_BUILDER_NAME.getPreferredName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(valueType.getValue()); + switch (valueType) { + case SIMPLE: + out.writeGenericValue(genericObj); + break; + case MAP: + out.writeMap((Map) genericObj); + break; + case LIST: + out.writeCollection((List) genericObj, StreamOutput::writeGenericValue); + break; + default: + throw new IllegalStateException("Unknown valueType: " + valueType); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + switch (valueType) { + case SIMPLE: + return builder.field(EXT_BUILDER_NAME.getPreferredName(), genericObj); + case MAP: + return builder.field(EXT_BUILDER_NAME.getPreferredName(), (Map) genericObj); + case LIST: + return builder.field(EXT_BUILDER_NAME.getPreferredName(), (List) genericObj); + default: + return null; + } + } + + // We need this for the equals method. + Object getValue() { + return genericObj; + } + + @Override + public int hashCode() { + return Objects.hash(this.valueType, this.genericObj); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (!(obj instanceof GenericSearchExtBuilder)) { + return false; + } + return Objects.equals(getValue(), ((GenericSearchExtBuilder) obj).getValue()) + && Objects.equals(valueType, ((GenericSearchExtBuilder) obj).valueType); + } +} diff --git a/server/src/main/java/org/opensearch/search/MultiValueMode.java b/server/src/main/java/org/opensearch/search/MultiValueMode.java index 0fbd41f062710..d812f1290efe3 100644 --- a/server/src/main/java/org/opensearch/search/MultiValueMode.java +++ b/server/src/main/java/org/opensearch/search/MultiValueMode.java @@ -542,7 +542,7 @@ public static MultiValueMode fromString(String sortMode) { * Return a {@link NumericDocValues} instance that can be used to sort documents * with this mode and the provided values. When a document has no value, * missingValue is returned. - * + *

            * Allowed Modes: SUM, AVG, MEDIAN, MIN, MAX */ public NumericDocValues select(final SortedNumericDocValues values) { @@ -583,12 +583,12 @@ protected long pick(SortedNumericDocValues values) throws IOException { /** * Return a {@link NumericDocValues} instance that can be used to sort root documents * with this mode, the provided values and filters for root/inner documents. - * + *

            * For every root document, the values of its inner documents will be aggregated. * If none of the inner documents has a value, then missingValue is returned. - * + *

            * Allowed Modes: SUM, AVG, MIN, MAX - * + *

            * NOTE: Calling the returned instance on docs that are not root docs is illegal * The returned instance can only be evaluate the current and upcoming docs */ @@ -658,7 +658,7 @@ protected long pick( * Return a {@link NumericDoubleValues} instance that can be used to sort documents * with this mode and the provided values. When a document has no value, * missingValue is returned. - * + *

            * Allowed Modes: SUM, AVG, MEDIAN, MIN, MAX */ public NumericDoubleValues select(final SortedNumericDoubleValues values) { @@ -694,12 +694,12 @@ protected double pick(SortedNumericDoubleValues values) throws IOException { /** * Return a {@link NumericDoubleValues} instance that can be used to sort root documents * with this mode, the provided values and filters for root/inner documents. - * + *

            * For every root document, the values of its inner documents will be aggregated. * If none of the inner documents has a value, then missingValue is returned. - * + *

            * Allowed Modes: SUM, AVG, MIN, MAX - * + *

            * NOTE: Calling the returned instance on docs that are not root docs is illegal * The returned instance can only be evaluate the current and upcoming docs */ @@ -761,7 +761,7 @@ protected double pick( * Return a {@link BinaryDocValues} instance that can be used to sort documents * with this mode and the provided values. When a document has no value, * missingValue is returned. - * + *

            * Allowed Modes: MIN, MAX */ public BinaryDocValues select(final SortedBinaryDocValues values, final BytesRef missingValue) { @@ -816,12 +816,12 @@ protected BytesRef pick(SortedBinaryDocValues values) throws IOException { /** * Return a {@link BinaryDocValues} instance that can be used to sort root documents * with this mode, the provided values and filters for root/inner documents. - * + *

            * For every root document, the values of its inner documents will be aggregated. * If none of the inner documents has a value, then missingValue is returned. - * + *

            * Allowed Modes: MIN, MAX - * + *

            * NOTE: Calling the returned instance on docs that are not root docs is illegal * The returned instance can only be evaluate the current and upcoming docs */ @@ -889,7 +889,7 @@ protected BytesRef pick( /** * Return a {@link SortedDocValues} instance that can be used to sort documents * with this mode and the provided values. - * + *

            * Allowed Modes: MIN, MAX */ public SortedDocValues select(final SortedSetDocValues values) { @@ -949,11 +949,11 @@ protected int pick(SortedSetDocValues values) throws IOException { /** * Return a {@link SortedDocValues} instance that can be used to sort root documents * with this mode, the provided values and filters for root/inner documents. - * + *

            * For every root document, the values of its inner documents will be aggregated. - * + *

            * Allowed Modes: MIN, MAX - * + *

            * NOTE: Calling the returned instance on docs that are not root docs is illegal * The returned instance can only be evaluate the current and upcoming docs */ diff --git a/server/src/main/java/org/opensearch/search/SearchBootstrapSettings.java b/server/src/main/java/org/opensearch/search/SearchBootstrapSettings.java deleted file mode 100644 index 7d167838a77c3..0000000000000 --- a/server/src/main/java/org/opensearch/search/SearchBootstrapSettings.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.search; - -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; - -/** - * Keeps track of all the search related node level settings which can be accessed via static methods - * - * @opensearch.internal - */ -public class SearchBootstrapSettings { - // settings to configure maximum slice created per search request using OS custom slice computation mechanism. Default lucene - // mechanism will not be used if this setting is set with value > 0 - public static final String CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY = "search.concurrent.max_slice_count"; - public static final int CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE = 0; - - // value == 0 means lucene slice computation will be used - // this setting will be updated to dynamic setting as part of https://github.com/opensearch-project/OpenSearch/issues/8870 - public static final Setting CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING = Setting.intSetting( - CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, - CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE, - CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE, - Setting.Property.NodeScope - ); - private static Settings settings; - - public static void initialize(Settings openSearchSettings) { - settings = openSearchSettings; - } - - public static int getTargetMaxSlice() { - return (settings != null) - ? settings.getAsInt( - CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, - CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE - ) - : CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE; - } -} diff --git a/server/src/main/java/org/opensearch/search/SearchExtBuilder.java b/server/src/main/java/org/opensearch/search/SearchExtBuilder.java index 4d86c6c2e2277..1df58666f6fdb 100644 --- a/server/src/main/java/org/opensearch/search/SearchExtBuilder.java +++ b/server/src/main/java/org/opensearch/search/SearchExtBuilder.java @@ -47,10 +47,10 @@ * Any state needs to be serialized as part of the {@link Writeable#writeTo(StreamOutput)} method and * read from the incoming stream, usually done adding a constructor that takes {@link StreamInput} as * an argument. - * + *

            * Registration happens through {@link SearchPlugin#getSearchExts()}, which also needs a {@link CheckedFunction} that's able to parse * the incoming request from the REST layer into the proper {@link SearchExtBuilder} subclass. - * + *

            * {@link #getWriteableName()} must return the same name as the one used for the registration * of the {@link SearchExtSpec}. * diff --git a/server/src/main/java/org/opensearch/search/SearchHit.java b/server/src/main/java/org/opensearch/search/SearchHit.java index fab9c1b773d0b..dfc716ed9744d 100644 --- a/server/src/main/java/org/opensearch/search/SearchHit.java +++ b/server/src/main/java/org/opensearch/search/SearchHit.java @@ -1052,7 +1052,7 @@ public int getOffset() { /** * Returns the next child nested level if there is any, otherwise null is returned. - * + *

            * In the case of mappings with multiple levels of nested object fields */ public NestedIdentity getChild() { diff --git a/server/src/main/java/org/opensearch/search/SearchModule.java b/server/src/main/java/org/opensearch/search/SearchModule.java index 65b4ba3f94987..cbef8434a73d8 100644 --- a/server/src/main/java/org/opensearch/search/SearchModule.java +++ b/server/src/main/java/org/opensearch/search/SearchModule.java @@ -343,7 +343,7 @@ public class SearchModule { /** * Constructs a new SearchModule object - * + *

            * NOTE: This constructor should not be called in production unless an accurate {@link Settings} object is provided. * When constructed, a static flag is set in Lucene {@link BooleanQuery#setMaxClauseCount} according to the settings. * @param settings Current settings diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index d43e6a3f4fb2e..b6573b29f8e75 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -147,6 +147,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.ExecutionException; @@ -255,6 +256,20 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv Property.NodeScope ); + // settings to configure maximum slice created per search request using OS custom slice computation mechanism. Default lucene + // mechanism will not be used if this setting is set with value > 0 + public static final String CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY = "search.concurrent.max_slice_count"; + public static final int CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE = 0; + + // value == 0 means lucene slice computation will be used + public static final Setting CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING = Setting.intSetting( + CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, + CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE, + CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE, + Property.Dynamic, + Property.NodeScope + ); + public static final int DEFAULT_SIZE = 10; public static final int DEFAULT_FROM = 0; @@ -1224,6 +1239,7 @@ private void processFailure(ReaderContext context, Exception exc) { private void parseSource(DefaultSearchContext context, SearchSourceBuilder source, boolean includeAggregations) { // nothing to parse... if (source == null) { + context.evaluateRequestShouldUseConcurrentSearch(); return; } SearchShardTarget shardTarget = context.shardTarget(); @@ -1270,9 +1286,6 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc if (source.minScore() != null) { context.minimumScore(source.minScore()); } - if (source.profile()) { - context.setProfilers(new Profilers(context.searcher(), context.isConcurrentSegmentSearchEnabled())); - } if (source.timeout() != null) { context.timeout(source.timeout()); } @@ -1406,6 +1419,10 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc final CollapseContext collapseContext = source.collapse().build(queryShardContext); context.collapse(collapseContext); } + context.evaluateRequestShouldUseConcurrentSearch(); + if (source.profile()) { + context.setProfilers(new Profilers(context.searcher(), context.shouldUseConcurrentSearch())); + } } /** @@ -1543,17 +1560,29 @@ private CanMatchResponse canMatch(ShardSearchRequest request, boolean checkRefre canMatch = aliasFilterCanMatch; } final FieldDoc searchAfterFieldDoc = getSearchAfterFieldDoc(request, context); - canMatch = canMatch && canMatchSearchAfter(searchAfterFieldDoc, minMax, sortBuilder); + final Integer trackTotalHitsUpto = request.source() == null ? null : request.source().trackTotalHitsUpTo(); + canMatch = canMatch && canMatchSearchAfter(searchAfterFieldDoc, minMax, sortBuilder, trackTotalHitsUpto); return new CanMatchResponse(canMatch || hasRefreshPending, minMax); } } } - public static boolean canMatchSearchAfter(FieldDoc searchAfter, MinAndMax minMax, FieldSortBuilder primarySortField) { + public static boolean canMatchSearchAfter( + FieldDoc searchAfter, + MinAndMax minMax, + FieldSortBuilder primarySortField, + Integer trackTotalHitsUpto + ) { // Check for sort.missing == null, since in case of missing values sort queries, if segment/shard's min/max // is out of search_after range, it still should be printed and hence we should not skip segment/shard. - if (searchAfter != null && minMax != null && primarySortField != null && primarySortField.missing() == null) { + // Skipping search on shard/segment entirely can cause mismatch on total_tracking_hits, hence skip only if + // track_total_hits is false. + if (searchAfter != null + && minMax != null + && primarySortField != null + && primarySortField.missing() == null + && Objects.equals(trackTotalHitsUpto, SearchContext.TRACK_TOTAL_HITS_DISABLED)) { final Object searchAfterPrimary = searchAfter.fields[0]; if (primarySortField.order() == SortOrder.DESC) { if (minMax.compareMin(searchAfterPrimary) > 0) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java b/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java index 392c65ce27aea..47e9def094623 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java @@ -135,7 +135,7 @@ public ScoreMode scoreMode() { * Returns a converter for point values if it's safe to use the indexed data instead of * doc values. Generally, this means that the query has no filters or scripts, the aggregation is * top level, and the underlying field is indexed, and the index is sorted in the right order. - * + *

            * If those conditions aren't met, return null to indicate a point reader cannot * be used in this case. * diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java index 81fd741e9139c..9b8ebe0b4e5e4 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java @@ -257,6 +257,15 @@ private AggregatorFactories(AggregatorFactory[] factories) { this.factories = factories; } + public boolean allFactoriesSupportConcurrentSearch() { + for (AggregatorFactory factory : factories) { + if (factory.supportsConcurrentSegmentSearch() == false || factory.evaluateChildFactories() == false) { + return false; + } + } + return true; + } + /** * Create all aggregators so that they can be consumed with multiple * buckets. diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java index 05686f35c2166..759d043743978 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java @@ -114,4 +114,15 @@ public AggregatorFactory getParent() { public String getStatsSubtype() { return OTHER_SUBTYPE; } + + /** + * Implementation should override this method and return true if the Aggregator created by the factory works with concurrent segment search execution model + */ + protected boolean supportsConcurrentSegmentSearch() { + return false; + } + + public boolean evaluateChildFactories() { + return factories.allFactoriesSupportConcurrentSearch(); + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java index 12a859e7c33e4..889358d55b153 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java +++ b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java @@ -215,7 +215,7 @@ public double sortValue(AggregationPath.PathElement head, Iterator * This method first reduces the aggregations, and if it is the final reduce, then reduce the pipeline * aggregations (both embedded parent/sibling as well as top-level sibling pipelines) */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java index 67af0b13eed3b..eef427754f535 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java @@ -132,7 +132,7 @@ public final void collectExistingBucket(LeafBucketCollector subCollector, int do /** * This only tidies up doc counts. Call {@link MergingBucketsDeferringCollector#mergeBuckets(long[])} to merge the actual * ordinals and doc ID deltas. - * + *

            * Refer to that method for documentation about the merge map. * * @deprecated use {@link mergeBuckets(long, LongUnaryOperator)} @@ -146,7 +146,7 @@ public final void mergeBuckets(long[] mergeMap, long newNumBuckets) { * * @param mergeMap a unary operator which maps a bucket's ordinal to the ordinal it should be merged with. * If a bucket's ordinal is mapped to -1 then the bucket is removed entirely. - * + *

            * This only tidies up doc counts. Call {@link MergingBucketsDeferringCollector#mergeBuckets(LongUnaryOperator)} to * merge the actual ordinals and doc ID deltas. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java index de74055bb94f3..dfb8f6be7155d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/GeoTileUtils.java @@ -101,7 +101,7 @@ private GeoTileUtils() {} /** * Parse an integer precision (zoom level). The {@link ValueType#INT} allows it to be a number or a string. - * + *

            * The precision is expressed as a zoom level between 0 and {@link #MAX_ZOOM} (inclusive). * * @param parser {@link XContentParser} to parse the value from diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java index 8d35c1edc8cb0..3e9424eda92a9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java @@ -55,16 +55,17 @@ public MergingBucketsDeferringCollector(SearchContext context, boolean isGlobal) /** * Merges/prunes the existing bucket ordinals and docDeltas according to the provided mergeMap. - * + *

            * The mergeMap is an array where the index position represents the current bucket ordinal, and * the value at that position represents the ordinal the bucket should be merged with. If * the value is set to -1 it is removed entirely. - * + *

            * For example, if the mergeMap [1,1,3,-1,3] is provided: - * - Buckets `0` and `1` will be merged to bucket ordinal `1` - * - Bucket `2` and `4` will be merged to ordinal `3` - * - Bucket `3` will be removed entirely - * + *

              + *
            • Buckets `0` and `1` will be merged to bucket ordinal `1`
            • + *
            • Bucket `2` and `4` will be merged to ordinal `3`
            • + *
            • Bucket `3` will be removed entirely
            • + *
            * This process rebuilds the ordinals and docDeltas according to the mergeMap, so it should * not be called unless there are actually changes to be made, to avoid unnecessary work. * @@ -80,7 +81,7 @@ public void mergeBuckets(long[] mergeMap) { * * @param mergeMap a unary operator which maps a bucket's ordinal to the ordinal it should be merged with. * If a bucket's ordinal is mapped to -1 then the bucket is removed entirely. - * + *

            * This process rebuilds the ordinals and docDeltas according to the mergeMap, so it should * not be called unless there are actually changes to be made, to avoid unnecessary work. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java index fe1270e10c80e..99ffb563ba2a8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java @@ -91,4 +91,8 @@ public Aggregator createInternal( return new AdjacencyMatrixAggregator(name, factories, separator, keys, weights, searchContext, parent, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java index 09691a69c75f4..2ff79fb623def 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java @@ -77,4 +77,9 @@ protected Aggregator createInternal( ) throws IOException { return new CompositeAggregator(name, factories, searchContext, parent, metadata, size, sources, afterKey); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index 567f7758eb9a4..4ec309b819183 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -221,7 +221,7 @@ public DateHistogramValuesSourceBuilder dateHistogramInterval(DateHistogramInter /** * Sets the interval of the DateHistogram using calendar units (`1d`, `1w`, `1M`, etc). These units * are calendar-aware, meaning they respect leap additions, variable days per month, etc. - * + *

            * This is mutually exclusive with {@link DateHistogramValuesSourceBuilder#fixedInterval(DateHistogramInterval)} * * @param interval The calendar interval to use with the aggregation @@ -234,7 +234,7 @@ public DateHistogramValuesSourceBuilder calendarInterval(DateHistogramInterval i /** * Sets the interval of the DateHistogram using fixed units (`1ms`, `1s`, `10m`, `4h`, etc). These are * not calendar aware and are simply multiples of fixed, SI units. - * + *

            * This is mutually exclusive with {@link DateHistogramValuesSourceBuilder#calendarInterval(DateHistogramInterval)} * * @param interval The fixed interval to use with the aggregation diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java index 6591c9c054224..fab9d11dd33e2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java @@ -154,10 +154,10 @@ public String getWriteableName() { @Override public InternalComposite create(List newBuckets) { - /** - * This is used by pipeline aggregations to filter/remove buckets so we - * keep the afterKey of the original aggregation in order - * to be able to retrieve the next page even if all buckets have been filtered. + /* + This is used by pipeline aggregations to filter/remove buckets so we + keep the afterKey of the original aggregation in order + to be able to retrieve the next page even if all buckets have been filtered. */ return new InternalComposite( name, @@ -485,8 +485,8 @@ public int compareKey(InternalBucket other) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - /** - * See {@link CompositeAggregation#bucketToXContent} + /* + See {@link CompositeAggregation#bucketToXContent} */ throw new UnsupportedOperationException("not implemented"); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/ParsedComposite.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/ParsedComposite.java index 8382b191025fe..2a1544e218f2c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/ParsedComposite.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/ParsedComposite.java @@ -70,9 +70,9 @@ public static ParsedComposite fromXContent(XContentParser parser, String name) t ParsedComposite aggregation = PARSER.parse(parser, null); aggregation.setName(name); if (aggregation.afterKey == null && aggregation.getBuckets().size() > 0) { - /** - * Previous versions (< 6.3) don't send afterKey - * in the response so we set it as the last returned buckets. + /* + Previous versions (< 6.3) don't send afterKey + in the response so we set it as the last returned buckets. */ aggregation.setAfterKey(aggregation.getBuckets().get(aggregation.getBuckets().size() - 1).key); } @@ -130,8 +130,8 @@ void setKey(Map key) { @Override public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - /** - * See {@link CompositeAggregation#bucketToXContent} + /* + See {@link CompositeAggregation#bucketToXContent} */ throw new UnsupportedOperationException("not implemented"); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java index bd0a4f13ddf08..9442529bf9342 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/SortedDocsProducer.java @@ -62,7 +62,7 @@ abstract class SortedDocsProducer { * Visits all non-deleted documents in iterator and fills the provided queue * with the top composite buckets extracted from the collection. * Documents that contain a top composite bucket are added in the provided builder if it is not null. - * + *

            * Returns true if the queue is full and the current leadSourceBucket did not produce any competitive * composite buckets. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java index 4ab573cf0a6b6..a0a636c121e12 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FilterAggregatorFactory.java @@ -75,7 +75,7 @@ public FilterAggregatorFactory( * necessary. This is done lazily so that the {@link Weight} is only created * if the aggregation collects documents reducing the overhead of the * aggregation in the case where no documents are collected. - * + *

            * Note that as aggregations are initialsed and executed in a serial manner, * no concurrency considerations are necessary here. */ @@ -101,4 +101,8 @@ public Aggregator createInternal( return new FilterAggregator(name, () -> this.getWeight(), factories, searchContext, parent, cardinality, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java index 795f81a08d8d5..a8e157a1cbb79 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filter/FiltersAggregatorFactory.java @@ -92,7 +92,7 @@ public FiltersAggregatorFactory( * necessary. This is done lazily so that the {@link Weight}s are only * created if the aggregation collects documents reducing the overhead of * the aggregation in the case where no documents are collected. - * + *

            * Note: With concurrent segment search use case, multiple aggregation collectors executing * on different threads will try to fetch the weights. To handle the race condition there is * a synchronization block @@ -146,4 +146,8 @@ public Aggregator createInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/global/GlobalAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/global/GlobalAggregatorFactory.java index 419ae9f16d9e6..47de1fcda29c9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/global/GlobalAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/global/GlobalAggregatorFactory.java @@ -82,4 +82,9 @@ public Aggregator createInternal( } return new GlobalAggregator(name, factories, searchContext, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index d7f89225524c0..b4f1e78f77aaf 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -546,7 +546,7 @@ private int collectValue(long owningBucketOrd, int roundingIdx, int doc, long ro /** * Increase the rounding of {@code owningBucketOrd} using - * estimated, bucket counts, {@link #rebucket() rebucketing} the all + * estimated, bucket counts, {@link FromMany#rebucket()} rebucketing} the all * buckets if the estimated number of wasted buckets is too high. */ private int increaseRoundingIfNeeded(long owningBucketOrd, int oldEstimatedBucketCount, long newKey, int oldRounding) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java index 7434ef84ee92f..059b88c9475ed 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java @@ -124,4 +124,9 @@ protected Aggregator createUnmapped(SearchContext searchContext, Aggregator pare metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index 210a605627def..3598b9c494d38 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -250,7 +250,7 @@ public DateHistogramAggregationBuilder dateHistogramInterval(DateHistogramInterv /** * Sets the interval of the DateHistogram using calendar units (`1d`, `1w`, `1M`, etc). These units * are calendar-aware, meaning they respect leap additions, variable days per month, etc. - * + *

            * This is mutually exclusive with {@link DateHistogramAggregationBuilder#fixedInterval(DateHistogramInterval)} * * @param interval The calendar interval to use with the aggregation @@ -263,7 +263,7 @@ public DateHistogramAggregationBuilder calendarInterval(DateHistogramInterval in /** * Sets the interval of the DateHistogram using fixed units (`1ms`, `1s`, `10m`, `4h`, etc). These are * not calendar aware and are simply multiples of fixed, SI units. - * + *

            * This is mutually exclusive with {@link DateHistogramAggregationBuilder#calendarInterval(DateHistogramInterval)} * * @param interval The fixed interval to use with the aggregation diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 8233c3d995dda..f602eea7a9b12 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -216,9 +216,7 @@ public void collectDebugInfo(BiConsumer add) { } /** - * Returns the size of the bucket in specified units. - * - * If unitSize is null, returns 1.0 + * @return the size of the bucket in specified units, or 1.0 if unitSize is null */ @Override public double bucketSize(long bucket, Rounding.DateTimeUnit unitSize) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index dd74d83c665de..807ec1ab4e4b7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -148,4 +148,9 @@ protected Aggregator createUnmapped(SearchContext searchContext, Aggregator pare metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramInterval.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramInterval.java index 8f8e71f3cd685..9f907bcacadf9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramInterval.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramInterval.java @@ -129,7 +129,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws * Converts this DateHistogramInterval into a millisecond representation. If this is a calendar * interval, it is an approximation of milliseconds based on the fixed equivalent (e.g. `1h` is treated as 60 * fixed minutes, rather than the hour at a specific point in time. - * + *

            * This is merely a convenience helper for quick comparisons and should not be used for situations that * require precise durations. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java index a2c63cf25c0c2..1d21152b6f622 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java @@ -34,7 +34,7 @@ /** * A shared interface for aggregations that parse and use "interval" parameters. - * + *

            * Provides definitions for the new fixed and calendar intervals, and deprecated * defintions for the old interval/dateHisto interval parameters * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java index a7faacbf517aa..4860e3e546acd 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java @@ -54,13 +54,13 @@ /** * A class that handles all the parsing, bwc and deprecations surrounding date histogram intervals. - * + *

            * - Provides parser helpers for the deprecated interval/dateHistogramInterval parameters. * - Provides parser helpers for the new calendar/fixed interval parameters * - Can read old intervals from a stream and convert to new intervals * - Can write new intervals to old format when streaming out * - Provides a variety of helper methods to interpret the intervals as different types, depending on caller's need - * + *

            * After the deprecated parameters are removed, this class can be simplified greatly. The legacy options * will be removed, and the mutual-exclusion checks can be done in the setters directly removing the need * for the enum and the complicated "state machine" logic @@ -234,7 +234,7 @@ public DateHistogramInterval getAsCalendarInterval() { /** * Sets the interval of the DateHistogram using calendar units (`1d`, `1w`, `1M`, etc). These units * are calendar-aware, meaning they respect leap additions, variable days per month, etc. - * + *

            * This is mutually exclusive with {@link DateIntervalWrapper#fixedInterval(DateHistogramInterval)} * * @param interval The fixed interval to use @@ -264,7 +264,7 @@ public DateHistogramInterval getAsFixedInterval() { /** * Sets the interval of the DateHistogram using fixed units (`1ms`, `1s`, `10m`, `4h`, etc). These are * not calendar aware and are simply multiples of fixed, SI units. - * + *

            * This is mutually exclusive with {@link DateIntervalWrapper#calendarInterval(DateHistogramInterval)} * * @param interval The fixed interval to use diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBounds.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBounds.java index 69c70ed1bf7fd..235ae8fcee6d7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBounds.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DoubleBounds.java @@ -49,7 +49,7 @@ /** * Represent hard_bounds and extended_bounds in histogram aggregations. - * + *

            * This class is similar to {@link LongBounds} used in date histograms, but is using longs to store data. LongBounds and DoubleBounds are * not used interchangeably and therefore don't share any common interfaces except for serialization. * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java index 321c16cdba970..7506dcde23641 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java @@ -149,4 +149,9 @@ protected Aggregator createUnmapped(SearchContext searchContext, Aggregator pare metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java index 52f689eb7c229..8ebd67bc1ebe5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java @@ -491,7 +491,7 @@ private void mergeBucketsWithPlan(List buckets, List plan, * Makes a merge plan by simulating the merging of the two closest buckets, until the target number of buckets is reached. * Distance is determined by centroid comparison. * Then, this plan is actually executed and the underlying buckets are merged. - * + *

            * Requires: buckets is sorted by centroid. */ private void mergeBucketsIfNeeded(List buckets, int targetNumBuckets, ReduceContext reduceContext) { @@ -567,7 +567,7 @@ private void mergeBucketsWithSameMin(List buckets, ReduceContext reduceC /** * When two adjacent buckets A, B overlap (A.max > B.min) then their boundary is set to * the midpoint: (A.max + B.min) / 2. - * + *

            * After this adjustment, A will contain more values than indicated and B will have less. */ private void adjustBoundsForOverlappingBuckets(List buckets, ReduceContext reduceContext) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/LongBounds.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/LongBounds.java index e138824e7fce8..ad6572916c84a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/LongBounds.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/LongBounds.java @@ -54,7 +54,7 @@ /** * Represent hard_bounds and extended_bounds in date-histogram aggregations. - * + *

            * This class is similar to {@link DoubleBounds} used in histograms, but is using longs to store data. LongBounds and DoubleBounds are * * not used interchangeably and therefore don't share any common interfaces except for serialization. * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java index 11ff7dbc407cb..526945243c786 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java @@ -76,7 +76,7 @@ public class VariableWidthHistogramAggregator extends DeferableBucketAggregator /** * This aggregator goes through multiple phases of collection. Each phase has a different CollectionPhase::collectValue * implementation - * + *

            * Running a clustering algorithm like K-Means is unfeasible because large indices don't fit into memory. * But having multiple collection phases lets us accurately bucket the docs in one pass. */ @@ -231,7 +231,7 @@ protected void swap(int i, int j) { * Produces a merge map where `mergeMap[i]` represents the index that values[i] * would be moved to if values were sorted * In other words, this method produces a merge map that will sort values - * + *

            * See BucketsAggregator::mergeBuckets to learn more about the merge map */ public long[] generateMergeMap() { @@ -242,10 +242,10 @@ public long[] generateMergeMap() { /** * Sorting the documents by key lets us bucket the documents into groups with a single linear scan - * + *

            * But we can't do this by just sorting buffer, because we also need to generate a merge map * for every change we make to the list, so that we can apply the changes to the underlying buckets as well. - * + *

            * By just creating a merge map, we eliminate the need to actually sort buffer. We can just * use the merge map to find any doc's sorted index. */ @@ -347,7 +347,7 @@ private void createAndAppendNewCluster(double value) { /** * Move the last cluster to position idx * This is expensive because a merge map of size numClusters is created, so don't call this method too often - * + *

            * TODO: Make this more efficient */ private void moveLastCluster(int index) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorFactory.java index d9d9a74eb958f..b846bf72ef4c5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorFactory.java @@ -116,4 +116,9 @@ protected Aggregator createUnmapped(SearchContext searchContext, Aggregator pare metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorFactory.java index cfa2bd3f7097c..3032d695a3ee2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/missing/MissingAggregatorFactory.java @@ -85,4 +85,9 @@ protected MissingAggregator doCreateInternal( .getAggregator(MissingAggregationBuilder.REGISTRY_KEY, config) .build(name, factories, config, searchContext, parent, cardinality, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java index ca1018795b518..a43d41882e475 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregatorFactory.java @@ -100,4 +100,8 @@ public InternalAggregation buildEmptyAggregation() { } } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java index 27cd8a2688836..816f05052b6a2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java @@ -83,6 +83,11 @@ public Aggregator createInternal( } } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + /** * Unmapped class for reverse nested agg * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java index bfd7845e7e16f..41f2768eb7544 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java @@ -122,4 +122,9 @@ protected Aggregator doCreateInternal( metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java index 0ee440ecc8487..fc4b4273df703 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java @@ -103,4 +103,8 @@ protected Aggregator doCreateInternal( .build(name, factories, config.getValuesSource(), config.format(), ranges, keyed, searchContext, parent, cardinality, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java index d243a89c632d7..dcf6b84164991 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java @@ -72,4 +72,8 @@ public DateRangeAggregatorFactory( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/GeoDistanceRangeAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/GeoDistanceRangeAggregatorFactory.java index 3208d35c6a407..728f43094cf7e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/GeoDistanceRangeAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/GeoDistanceRangeAggregatorFactory.java @@ -172,6 +172,11 @@ protected Aggregator doCreateInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + /** * The source location for the distance calculation * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorFactory.java index 803bceaf57fb5..c58b2e881803c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorFactory.java @@ -73,4 +73,9 @@ public RangeAggregatorFactory( metadata ); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java index 41ef823a375c0..5f81c76b69385 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java @@ -159,4 +159,9 @@ public InternalAggregation buildEmptyAggregation() { } }; } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregator.java index 23e2bc6b3e54b..a886bdb3ae188 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregator.java @@ -55,7 +55,7 @@ /** * Aggregate on only the top-scoring docs on a shard. - * + *

            * TODO currently the diversity feature of this agg offers only 'script' and * 'field' as a means of generating a de-dup value. In future it would be nice * if users could use any of the "bucket" aggs syntax (geo, date histogram...) diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java index fa98c799352a6..d3db8a66ee21f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/sampler/SamplerAggregatorFactory.java @@ -73,4 +73,8 @@ public Aggregator createInternal( return new SamplerAggregator(name, shardSize, factories, searchContext, parent, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/BytesKeyedBucketOrds.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/BytesKeyedBucketOrds.java index 0eb23013d1e47..5d7c5c2976169 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/BytesKeyedBucketOrds.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/BytesKeyedBucketOrds.java @@ -131,7 +131,7 @@ private static class FromSingle extends BytesKeyedBucketOrds { private final BytesRefHash ords; private FromSingle(BigArrays bigArrays) { - ords = new BytesRefHash(1, bigArrays); + ords = new BytesRefHash(bigArrays); } @Override @@ -190,7 +190,7 @@ private static class FromMany extends BytesKeyedBucketOrds { private final LongKeyedBucketOrds longToBucketOrds; private FromMany(BigArrays bigArrays) { - bytesToLong = new BytesRefHash(1, bigArrays); + bytesToLong = new BytesRefHash(bigArrays); longToBucketOrds = LongKeyedBucketOrds.build(bigArrays, CardinalityUpperBound.MANY); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java index 4d1cbd4ce72f1..5b90163fa3959 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalMultiTerms.java @@ -407,8 +407,8 @@ public int hashCode() { /** * Copy from InternalComposite - * - * Format obj using the provided {@link DocValueFormat}. + *

            + * Format {@code obj} using the provided {@link DocValueFormat}. * If the format is equals to {@link DocValueFormat#RAW}, the object is returned as is * for numbers and a string for {@link BytesRef}s. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java index 3510b0079057b..02d9c31d90141 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/InternalTerms.java @@ -443,11 +443,11 @@ public InternalAggregation reduce(List aggregations, Reduce } final List reducedBuckets; - /** - * Buckets returned by a partial reduce or a shard response are sorted by key since {@link LegacyESVersion#V_7_10_0}. - * That allows to perform a merge sort when reducing multiple aggregations together. - * For backward compatibility, we disable the merge sort and use ({@link InternalTerms#reduceLegacy} if any of - * the provided aggregations use a different {@link InternalTerms#reduceOrder}. + /* + Buckets returned by a partial reduce or a shard response are sorted by key since {@link LegacyESVersion#V_7_10_0}. + That allows to perform a merge sort when reducing multiple aggregations together. + For backward compatibility, we disable the merge sort and use ({@link InternalTerms#reduceLegacy} if any of + the provided aggregations use a different {@link InternalTerms#reduceOrder}. */ BucketOrder thisReduceOrder = getReduceOrder(aggregations); if (isKeyOrder(thisReduceOrder)) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java index aa6da630aa9f3..7134999e4aa85 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregationFactory.java @@ -157,6 +157,11 @@ protected Aggregator createInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + /** * Supplier for internal values source * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java index 0482ef823818c..59f48bd7fbaba 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/MultiTermsAggregator.java @@ -94,10 +94,10 @@ public MultiTermsAggregator( this.partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this); // Todo, copy from TermsAggregator. need to remove duplicate code. if (subAggsNeedScore() && descendsFromNestedAggregator(parent)) { - /** - * Force the execution to depth_first because we need to access the score of - * nested documents in a sub-aggregation and we are not able to generate this score - * while replaying deferred documents. + /* + Force the execution to depth_first because we need to access the score of + nested documents in a sub-aggregation and we are not able to generate this score + while replaying deferred documents. */ this.collectMode = SubAggCollectionMode.DEPTH_FIRST; } else { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java index dc616ca7512be..5d83d926ab36f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java @@ -196,7 +196,7 @@ public double getPrecision() { * Set's the false-positive rate for individual cuckoo filters. Does not dictate the overall fpp rate * since we use a "scaling" cuckoo filter which adds more filters as required, and the overall * error rate grows differently than individual filters - * + *

            * This value does, however, affect the overall space usage of the filter. Coarser precisions provide * more compact filters. The default is 0.01 */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java index 93b8eca370d46..b5f3abe89ac59 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java @@ -237,6 +237,11 @@ protected Aggregator doCreateInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + /** * Execution mode for rare terms agg * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificanceLookup.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificanceLookup.java index aee4caa67afa1..34bbac55900a8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificanceLookup.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificanceLookup.java @@ -123,7 +123,7 @@ public void close() {} }; } return new BackgroundFrequencyForBytes() { - private final BytesRefHash termToPosition = new BytesRefHash(1, bigArrays); + private final BytesRefHash termToPosition = new BytesRefHash(bigArrays); private LongArray positionToFreq = bigArrays.newLongArray(1, false); @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java index 54fb746b97ebb..f6802a58dfed2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTermsAggregatorFactory.java @@ -306,6 +306,11 @@ protected Aggregator doCreateInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + /** * The execution mode for the significant terms agg * diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java index 8acc69083dea4..81366c212c86c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java @@ -312,4 +312,9 @@ public void close() { Releasables.close(dupSequenceSpotters); } } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java index c796faa6a8b76..cc35fe75e5e92 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/StringRareTermsAggregator.java @@ -135,7 +135,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I Arrays.fill(mergeMap, -1); long offset = 0; for (int owningOrdIdx = 0; owningOrdIdx < owningBucketOrds.length; owningOrdIdx++) { - try (BytesRefHash bucketsInThisOwningBucketToCollect = new BytesRefHash(1, context.bigArrays())) { + try (BytesRefHash bucketsInThisOwningBucketToCollect = new BytesRefHash(context.bigArrays())) { filters[owningOrdIdx] = newFilter(); List builtBuckets = new ArrayList<>(); BytesKeyedBucketOrds.BucketOrdsEnum collectedBuckets = bucketOrds.ordsEnum(owningBucketOrds[owningOrdIdx]); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java index 7cacf1e918380..845149d894aad 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregator.java @@ -244,10 +244,10 @@ public TermsAggregator( partiallyBuiltBucketComparator = order == null ? null : order.partiallyBuiltBucketComparator(b -> b.bucketOrd, this); this.format = format; if (subAggsNeedScore() && descendsFromNestedAggregator(parent)) { - /** - * Force the execution to depth_first because we need to access the score of - * nested documents in a sub-aggregation and we are not able to generate this score - * while replaying deferred documents. + /* + Force the execution to depth_first because we need to access the score of + nested documents in a sub-aggregation and we are not able to generate this score + while replaying deferred documents. */ this.collectMode = SubAggCollectionMode.DEPTH_FIRST; } else { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 62844b4499dba..a4d73bfd3e634 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -558,4 +558,8 @@ public String toString() { } } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLog.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLog.java index 6b998fc86361d..902e4d69ed5fa 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLog.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLog.java @@ -34,9 +34,9 @@ /** * Hyperloglog counter, implemented based on pseudo code from - * http://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf and its appendix - * https://docs.google.com/document/d/1gyjfMHy43U9OWBXxfaeG-3MjGzejW1dlpyMwEYAAWEI/view?fullscreen - * + * 40671.pdf and its + * appendix + *

            * Trying to understand what this class does without having read the paper is considered adventurous. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java index 4354a23b70f6b..e74179b403e8e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractHyperLogLogPlusPlus.java @@ -46,7 +46,6 @@ /** * Base class for HLL++ algorithms. - * * It contains methods for cloning and serializing the data structure. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractLinearCounting.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractLinearCounting.java index 3f5f524c9c2f5..7c00b25ae365f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractLinearCounting.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractLinearCounting.java @@ -36,11 +36,11 @@ /** * Linear counter, implemented based on pseudo code from - * http://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf and its appendix - * https://docs.google.com/document/d/1gyjfMHy43U9OWBXxfaeG-3MjGzejW1dlpyMwEYAAWEI/view?fullscreen - * + * 40671.pdf and its + * appendix + *

            * Trying to understand what this class does without having read the paper is considered adventurous. - * + *

            * The algorithm just keep a record of all distinct values provided encoded as an integer. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java index a143b6a69da58..32d3cfeec2eaf 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java @@ -74,7 +74,7 @@ public static > ConstructingO ParseField valuesField ) { - /** + /* * This is a non-ideal ConstructingObjectParser, because it is a compromise between Percentiles and Ranks. * Ranks requires an array of values because there is no sane default, and we want to keep that in the ctor. * Percentiles has defaults, which means the API allows the user to either use the default or configure @@ -87,6 +87,7 @@ public static > ConstructingO * out the behavior from there * * `args` are provided from the ConstructingObjectParser in-order they are defined in the parser. So: + * * - args[0]: values * - args[1]: tdigest config options * - args[2]: hdr config options @@ -221,7 +222,7 @@ public boolean keyed() { /** * Expert: set the number of significant digits in the values. Only relevant * when using {@link PercentilesMethod#HDR}. - * + *

            * Deprecated: set numberOfSignificantValueDigits by configuring a {@link PercentilesConfig.Hdr} instead * and set via {@link PercentilesAggregationBuilder#percentilesConfig(PercentilesConfig)} */ @@ -241,7 +242,7 @@ public T numberOfSignificantValueDigits(int numberOfSignificantValueDigits) { /** * Expert: get the number of significant digits in the values. Only relevant * when using {@link PercentilesMethod#HDR}. - * + *

            * Deprecated: get numberOfSignificantValueDigits by inspecting the {@link PercentilesConfig} returned from * {@link PercentilesAggregationBuilder#percentilesConfig()} instead */ @@ -256,7 +257,7 @@ public int numberOfSignificantValueDigits() { /** * Expert: set the compression. Higher values improve accuracy but also * memory usage. Only relevant when using {@link PercentilesMethod#TDIGEST}. - * + *

            * Deprecated: set compression by configuring a {@link PercentilesConfig.TDigest} instead * and set via {@link PercentilesAggregationBuilder#percentilesConfig(PercentilesConfig)} */ @@ -273,7 +274,7 @@ public T compression(double compression) { /** * Expert: get the compression. Higher values improve accuracy but also * memory usage. Only relevant when using {@link PercentilesMethod#TDIGEST}. - * + *

            * Deprecated: get compression by inspecting the {@link PercentilesConfig} returned from * {@link PercentilesAggregationBuilder#percentilesConfig()} instead */ @@ -341,15 +342,15 @@ public T percentilesConfig(PercentilesConfig percentilesConfig) { /** * Return the current algo configuration, or a default (Tdigest) otherwise - * + *

            * This is needed because builders don't have a "build" or "finalize" method, but * the old API did bake in defaults. Certain operations like xcontent, equals, hashcode * will use the values in the builder at any time and need to be aware of defaults. - * + *

            * But to maintain BWC behavior as much as possible, we allow the user to set * algo settings independent of method. To keep life simple we use a null to track * if any method has been selected yet. - * + *

            * However, this means we need a way to fetch the default if the user hasn't * selected any method and uses a builder-side feature like xcontent */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java index 75419b7c64b12..0a09fae1eaebe 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java @@ -90,4 +90,9 @@ protected Aggregator doCreateInternal( .getAggregator(AvgAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java index 47084436d3d4f..980667b45324e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/CardinalityAggregatorFactory.java @@ -89,6 +89,11 @@ protected Aggregator doCreateInternal( .build(name, config, precision(), searchContext, parent, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + private int precision() { return precisionThreshold == null ? HyperLogLogPlusPlus.DEFAULT_PRECISION diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java index 20203b22b2459..99b3d09517a1f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java @@ -94,4 +94,9 @@ protected Aggregator doCreateInternal( .getAggregator(ExtendedStatsAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, sigma, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java index 1d450eeae98d8..a3fc91c6b62fb 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java @@ -81,6 +81,11 @@ protected Aggregator doCreateInternal( .build(name, config, searchContext, parent, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register(GeoCentroidAggregationBuilder.REGISTRY_KEY, CoreValuesSourceType.GEOPOINT, GeoCentroidAggregator::new, true); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java index 7bd1e1aa22a90..7ab35eaed785c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java @@ -49,16 +49,16 @@ * Hyperloglog++ counter, implemented based on pseudo code from * http://static.googleusercontent.com/media/research.google.com/fr//pubs/archive/40671.pdf and its appendix * https://docs.google.com/document/d/1gyjfMHy43U9OWBXxfaeG-3MjGzejW1dlpyMwEYAAWEI/view?fullscreen - * + *

            * This implementation is different from the original implementation in that it uses a hash table instead of a sorted list for linear * counting. Although this requires more space and makes hyperloglog (which is less accurate) used sooner, this is also considerably faster. - * + *

            * Trying to understand what this class does without having read the paper is considered adventurous. - * + *

            * The HyperLogLogPlusPlus contains two algorithms, one for linear counting and the HyperLogLog algorithm. Initially hashes added to the * data structure are processed using the linear counting until a threshold defined by the precision is reached where the data is replayed * to the HyperLogLog algorithm and then this is used. - * + *

            * It supports storing several HyperLogLogPlusPlus structures which are identified by a bucket number. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java index 252df7358ddcd..558e9df93c804 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlusSparse.java @@ -40,7 +40,7 @@ /** * AbstractHyperLogLogPlusPlus instance that only supports linear counting. The maximum number of hashes supported * by the structure is determined at construction time. - * + *

            * This structure expects all the added values to be distinct and therefore there are no checks * if an element has been previously added. * diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java index 96f1af94f2d07..4fe936c8b7797 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java @@ -90,4 +90,9 @@ protected Aggregator doCreateInternal( .getAggregator(MaxAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java index 9776595d5a76d..3ef3c2afc7875 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java @@ -95,4 +95,9 @@ protected Aggregator doCreateInternal( .getAggregator(MedianAbsoluteDeviationAggregationBuilder.REGISTRY_KEY, config) .build(name, config.getValuesSource(), config.format(), searchContext, parent, metadata, compression); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java index b117f70c81baf..58fbe5edefd12 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java @@ -90,4 +90,9 @@ protected Aggregator doCreateInternal( .getAggregator(MinAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentileRanksAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentileRanksAggregatorFactory.java index 19352d30a5177..d3c18bcad1a43 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentileRanksAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentileRanksAggregatorFactory.java @@ -111,4 +111,9 @@ protected Aggregator doCreateInternal( .getAggregator(PercentileRanksAggregationBuilder.REGISTRY_KEY, config) .build(name, config.getValuesSource(), searchContext, parent, percents, percentilesConfig, keyed, config.format(), metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentilesAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentilesAggregatorFactory.java index e249863e25313..148e26e038923 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentilesAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/PercentilesAggregatorFactory.java @@ -103,4 +103,9 @@ protected Aggregator doCreateInternal( .getAggregator(PercentilesAggregationBuilder.REGISTRY_KEY, config) .build(name, config.getValuesSource(), searchContext, parent, percents, percentilesConfig, keyed, config.format(), metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java index 5c831d60f75a8..58ef54ed64482 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java @@ -124,6 +124,11 @@ public Aggregator createInternal( ); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } + private static Script deepCopyScript(Script script, SearchContext context, Map aggParams) { if (script != null) { Map params = mergeParams(aggParams, deepCopyParams(script.getParams(), context)); diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregatorFactory.java index 0c10df174efa0..0e96e631044dd 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/StatsAggregatorFactory.java @@ -90,4 +90,9 @@ protected Aggregator doCreateInternal( .getAggregator(StatsAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java index b3506ff958833..ef9b93920ba18 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java @@ -90,4 +90,9 @@ protected Aggregator doCreateInternal( .getAggregator(SumAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorFactory.java index e312983cd6d24..ba371327c6893 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/TopHitsAggregatorFactory.java @@ -155,4 +155,8 @@ public Aggregator createInternal( return new TopHitsAggregator(searchContext.fetchPhase(), subSearchContext, name, searchContext, parent, metadata); } + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java index be98df384fc28..6f9be06231819 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java @@ -52,7 +52,7 @@ /** * A field data based aggregator that counts the number of values a specific field has within the aggregation context. - * + *

            * This aggregator works in a multi-bucket mode, that is, when serves as a sub-aggregator, a single aggregator instance aggregates the * counts for all buckets owned by the parent aggregator) * diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorFactory.java index feed42e911856..4a04dd2e0a932 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorFactory.java @@ -84,4 +84,9 @@ protected Aggregator doCreateInternal( .getAggregator(ValueCountAggregationBuilder.REGISTRY_KEY, config) .build(name, config, searchContext, parent, metadata); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java index 9a27e9801d5fe..111245cae99e5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java @@ -95,4 +95,9 @@ protected Aggregator doCreateInternal( public String getStatsSubtype() { return configs.get(VALUE_FIELD.getPreferredName()).valueSourceType().typeName(); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketHelpers.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketHelpers.java index 3015e7ce9f364..c7f2a29793bff 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketHelpers.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketHelpers.java @@ -64,7 +64,7 @@ public class BucketHelpers { * a date_histogram might have empty buckets due to no data existing for that time interval. * This can cause problems for operations like a derivative, which relies on a continuous * function. - * + *

            * "insert_zeros": empty buckets will be filled with zeros for all metrics * "skip": empty buckets will simply be ignored * diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java index aaeddbfde3eb0..a7e1555d38d80 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java @@ -56,18 +56,18 @@ /** * This pipeline aggregation gives the user the ability to script functions that "move" across a window * of data, instead of single data points. It is the scripted version of MovingAvg pipeline agg. - * + *

            * Through custom script contexts, we expose a number of convenience methods: - * - * - max - * - min - * - sum - * - unweightedAvg - * - linearWeightedAvg - * - ewma - * - holt - * - holtWintersMovAvg - * + *

              + *
            • max
            • + *
            • min
            • + *
            • sum
            • + *
            • unweightedAvg
            • + *
            • linearWeightedAvg
            • + *
            • ewma
            • + *
            • holt
            • + *
            • holtWintersMovAvg
            • + *
            * The user can also define any arbitrary logic via their own scripting, or combine with the above methods. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovingFunctions.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovingFunctions.java index bac486576f537..051b9c43f63f5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovingFunctions.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovingFunctions.java @@ -75,7 +75,7 @@ public static double sum(double[] values) { /** * Calculate a simple unweighted (arithmetic) moving average. - * + *

            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN. * The average is based on the count of non-null, non-NaN values. @@ -94,7 +94,7 @@ public static double unweightedAvg(double[] values) { /** * Calculate a standard deviation over the values using the provided average. - * + *

            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN. * The average is based on the count of non-null, non-NaN values. @@ -118,7 +118,7 @@ public static double stdDev(double[] values, double avg) { /** * Calculate a linearly weighted moving average, such that older values are * linearly less important. "Time" is determined by position in collection - * + *

            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN * The average is based on the count of non-null, non-NaN values. @@ -141,11 +141,11 @@ public static double linearWeightedAvg(double[] values) { /** * * Calculate a exponentially weighted moving average. - * + *

            * Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values * (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g. * the series mean). Useful values are somewhere in between. Defaults to 0.5. - * + *

            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN * The average is based on the count of non-null, non-NaN values. @@ -171,13 +171,13 @@ public static double ewma(double[] values, double alpha) { /** * Calculate a doubly exponential weighted moving average - * + *

            * Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values * (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g. * the series mean). Useful values are somewhere in between. Defaults to 0.5. - * + *

            * Beta is equivalent to alpha, but controls the smoothing of the trend instead of the data - * + *

            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN * The average is based on the count of non-null, non-NaN values. @@ -241,14 +241,14 @@ public static double[] holtForecast(double[] values, double alpha, double beta, /** * Calculate a triple exponential weighted moving average - * + *

            * Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values * (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g. * the series mean). Useful values are somewhere in between. Defaults to 0.5. - * + *

            * Beta is equivalent to alpha, but controls the smoothing of the trend instead of the data. * Gamma is equivalent to alpha, but controls the smoothing of the seasonality instead of the data - * + *

            * Only finite values are averaged. NaN or null are ignored. * If all values are missing/null/NaN, the return value will be NaN * The average is based on the count of non-null, non-NaN values. diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java index a61a866228161..8427346357b0e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/SimulatedAnealingMinimizer.java @@ -36,11 +36,11 @@ /** * A cost minimizer which will fit a MovAvgModel to the data. - * + *

            * This optimizer uses naive simulated annealing. Random solutions in the problem space * are generated, compared against the last period of data, and the least absolute deviation * is recorded as a cost. - * + *

            * If the new cost is better than the old cost, the new coefficients are chosen. If the new * solution is worse, there is a temperature-dependent probability it will be randomly selected * anyway. This allows the algo to sample the problem space widely. As iterations progress, @@ -114,7 +114,7 @@ private static double acceptanceProbability(double oldCost, double newCost, doub /** * Calculates the "cost" of a model. E.g. when run on the training data, how closely do the predictions * match the test data - * + *

            * Uses Least Absolute Differences to calculate error. Note that this is not scale free, but seems * to work fairly well in practice * diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java index 25cdd76183602..a7de47bed2e6e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/AggregationInspectionHelper.java @@ -78,9 +78,9 @@ * Provides a set of static helpers to determine if a particular type of InternalAggregation "has a value" * or not. This can be difficult to determine from an external perspective because each agg uses * different internal bookkeeping to determine if it is empty or not (NaN, +/-Inf, 0.0, etc). - * + *

            * This set of helpers aim to ease that task by codifying what "empty" is for each agg. - * + *

            * It is not entirely accurate for all aggs, since some do not expose or track the needed state * (e.g. sum doesn't record count, so it's not clear if the sum is 0.0 because it is empty * or because of summing to zero). Pipeline aggs in particular are not well supported diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/CoreValuesSourceType.java b/server/src/main/java/org/opensearch/search/aggregations/support/CoreValuesSourceType.java index 224f9281705e1..f6d6fe28a56d3 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/CoreValuesSourceType.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/CoreValuesSourceType.java @@ -274,7 +274,7 @@ public ValuesSource replaceMissing(ValuesSource valuesSource, Object rawMissing, * MappedFieldType, it prefers to get the formatter from there. Only when a field can't be * resolved (which is to say script cases and unmapped field cases), it will fall back to calling this method on whatever * ValuesSourceType it was able to resolve to. - * + *

            * For geoshape field we may never hit this function till we have aggregations which are only geo_shape * specific and not present on geo_points, as we use default CoreValueSource types for Geo based aggregations * as GEOPOINT @@ -411,7 +411,7 @@ public ValuesSource replaceMissing(ValuesSource valuesSource, Object rawMissing, @Override public DocValueFormat getFormatter(String format, ZoneId tz) { return new DocValueFormat.DateTime( - format == null ? DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER : DateFormatter.forPattern(format), + format == null ? DateFieldMapper.getDefaultDateTimeFormatter() : DateFormatter.forPattern(format), tz == null ? ZoneOffset.UTC : tz, // If we were just looking at fields, we could read the resolution from the field settings, but we need to deal with script // output, which has no way to indicate the resolution, so we need to default to something. Milliseconds is the standard. diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java index e3f914ca259f6..c866238d12fcb 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java @@ -51,7 +51,7 @@ /** * Similar to {@link ValuesSourceAggregationBuilder}, except it references multiple ValuesSources (e.g. so that an aggregation * can pull values from multiple fields). - * + *

            * A limitation of this class is that all the ValuesSource's being refereenced must be of the same type. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValueType.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValueType.java index 59fa2e03f0bc3..33fefa57d50f0 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValueType.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValueType.java @@ -61,7 +61,7 @@ public enum ValueType implements Writeable { "date", "date", CoreValuesSourceType.DATE, - new DocValueFormat.DateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, ZoneOffset.UTC, DateFieldMapper.Resolution.MILLISECONDS) + new DocValueFormat.DateTime(DateFieldMapper.getDefaultDateTimeFormatter(), ZoneOffset.UTC, DateFieldMapper.Resolution.MILLISECONDS) ), IP((byte) 6, "ip", "ip", CoreValuesSourceType.IP, DocValueFormat.IP), // TODO: what is the difference between "number" and "numeric"? diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index 68448d93052b8..2b1b06a3693d5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -275,7 +275,7 @@ protected final void doWriteTo(StreamOutput out) throws IOException { /** * DO NOT OVERRIDE THIS! - * + *

            * This method only exists for legacy support. No new aggregations need this, nor should they override it. * * @param version For backwards compatibility, subclasses can change behavior based on the version diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceType.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceType.java index 86102e63297d1..9158e9f59cab2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceType.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceType.java @@ -43,14 +43,14 @@ * {@link ValuesSourceType} represents a collection of fields that share a common set of operations, for example all numeric fields. * Aggregations declare their support for a given ValuesSourceType (via {@link ValuesSourceRegistry.Builder#register}), * and should then not need to care about the fields which use that ValuesSourceType. - * + *

            * ValuesSourceTypes provide a set of methods to instantiate concrete {@link ValuesSource} instances, based on the actual source of the * data for the aggregations. In general, aggregations should not call these methods, but rather rely on {@link ValuesSourceConfig} to have * selected the correct implementation. - * + *

            * ValuesSourceTypes should be stateless. We recommend that plugins define an enum for their ValuesSourceTypes, even if the plugin only * intends to define one ValuesSourceType. ValuesSourceTypes are not serialized as part of the aggregations framework. - * + *

            * Prefer reusing an existing ValuesSourceType (ideally from {@link CoreValuesSourceType}) over creating a new type. There are some cases * where creating a new type is necessary however. In particular, consider a new ValuesSourceType if the field has custom encoding/decoding * requirements; if the field needs to expose additional information to the aggregation (e.g. {@link ValuesSource.Range#rangeType()}); or diff --git a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java index 4f6c2c327509d..fac1ac319087e 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java +++ b/server/src/main/java/org/opensearch/search/backpressure/SearchBackpressureService.java @@ -255,8 +255,8 @@ boolean isNodeInDuress() { return isNodeInDuress; } - /** - * Returns true if the increase in heap usage is due to search requests. + /* + Returns true if the increase in heap usage is due to search requests. */ /** diff --git a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java index d20e3e50d419f..4c28d96d8289e 100644 --- a/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java +++ b/server/src/main/java/org/opensearch/search/backpressure/settings/SearchBackpressureSettings.java @@ -54,7 +54,7 @@ private static class Defaults { /** * Defines the percentage of tasks to cancel relative to the number of successful task completions. * In other words, it is the number of tokens added to the bucket on each successful task completion. - * + *

            * The setting below is deprecated. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ @@ -71,7 +71,7 @@ private static class Defaults { /** * Defines the number of tasks to cancel per unit time (in millis). * In other words, it is the number of tokens added to the bucket each millisecond. - * + *

            * The setting below is deprecated. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ @@ -86,7 +86,7 @@ private static class Defaults { /** * Defines the maximum number of tasks that can be cancelled before being rate-limited. - * + *

            * The setting below is deprecated. * To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. */ diff --git a/server/src/main/java/org/opensearch/search/fetch/FetchSubPhase.java b/server/src/main/java/org/opensearch/search/fetch/FetchSubPhase.java index fa30b2a5c7450..f3a1d5cafe755 100644 --- a/server/src/main/java/org/opensearch/search/fetch/FetchSubPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/FetchSubPhase.java @@ -88,7 +88,7 @@ public int docId() { /** * This lookup provides access to the source for the given hit document. Note * that it should always be set to the correct doc ID and {@link LeafReaderContext}. - * + *

            * In most cases, the hit document's source is loaded eagerly at the start of the * {@link FetchPhase}. This lookup will contain the preloaded source. */ @@ -103,7 +103,7 @@ public IndexReader topLevelReader() { /** * Returns a {@link FetchSubPhaseProcessor} for this sub phase. - * + *

            * If nothing should be executed for the provided {@code FetchContext}, then the * implementation should return {@code null} */ diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesPhase.java b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesPhase.java index a8ab8c0dcb8a8..9b17d9dbcd8de 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchDocValuesPhase.java @@ -47,7 +47,7 @@ /** * Fetch sub phase which pulls data from doc values. - * + *

            * Specifying {@code "docvalue_fields": ["field1", "field2"]} * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java b/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java index f50524244b115..cc941bb240b91 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/InnerHitsContext.java @@ -138,7 +138,7 @@ public SearchContext parentSearchContext() { /** * The _id of the root document. - * + *

            * Since this ID is available on the context, inner hits can avoid re-loading the root _id. */ public String getId() { @@ -151,7 +151,7 @@ public void setId(String id) { /** * A source lookup for the root document. - * + *

            * This shared lookup allows inner hits to avoid re-loading the root _source. */ public SourceLookup getRootLookup() { diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/CustomQueryScorer.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/CustomQueryScorer.java index d0fb0f6da53c4..89c77b3cd403f 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/CustomQueryScorer.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/CustomQueryScorer.java @@ -33,6 +33,7 @@ package org.opensearch.search.fetch.subphase.highlight; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.highlight.QueryScorer; import org.apache.lucene.search.highlight.WeightedSpanTerm; @@ -104,6 +105,8 @@ protected void extract(Query query, float boost, Map t super.extract(((FunctionScoreQuery) query).getSubQuery(), boost, terms); } else if (query instanceof OpenSearchToParentBlockJoinQuery) { super.extract(((OpenSearchToParentBlockJoinQuery) query).getChildQuery(), boost, terms); + } else if (query instanceof IndexOrDocValuesQuery) { + super.extract(((IndexOrDocValuesQuery) query).getIndexQuery(), boost, terms); } else { super.extract(query, boost, terms); } diff --git a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java index a4e9e290e7094..7e2e7de2643f6 100644 --- a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java @@ -64,11 +64,9 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.CombinedBitSet; import org.apache.lucene.util.SparseFixedBitSet; -import org.opensearch.cluster.metadata.DataStream; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.search.DocValueFormat; -import org.opensearch.search.SearchBootstrapSettings; import org.opensearch.search.SearchService; import org.opensearch.search.dfs.AggregatedDfs; import org.opensearch.search.profile.ContextualProfileBreakdown; @@ -269,10 +267,11 @@ public void search( @Override protected void search(List leaves, Weight weight, Collector collector) throws IOException { - if (shouldReverseLeafReaderContexts()) { - // reverse the segment search order if this flag is true. - // Certain queries can benefit if we reverse the segment read order, - // for example time series based queries if searched for desc sort order. + // Time series based workload by default traverses segments in desc order i.e. latest to the oldest order. + // This is actually beneficial for search queries to start search on latest segments first for time series workload. + // That can slow down ASC order queries on timestamp workload. So to avoid that slowdown, we will reverse leaf + // reader order here. + if (searchContext.shouldUseTimeSeriesDescSortOptimization()) { for (int i = leaves.size() - 1; i >= 0; i--) { searchLeaf(leaves.get(i), weight, collector); } @@ -286,7 +285,7 @@ protected void search(List leaves, Weight weight, Collector c /** * Lower-level search API. - * + *

            * {@link LeafCollector#collect(int)} is called for every matching document in * the provided ctx. */ @@ -300,6 +299,9 @@ private void searchLeaf(LeafReaderContext ctx, Weight weight, Collector collecto final LeafCollector leafCollector; try { cancellable.checkCancelled(); + if (weight instanceof ProfileWeight) { + ((ProfileWeight) weight).associateCollectorToLeaves(ctx, collector); + } weight = wrapWeight(weight); // See please https://github.com/apache/lucene/pull/964 collector.setWeight(weight); @@ -451,9 +453,7 @@ public CollectionStatistics collectionStatistics(String field) throws IOExceptio */ @Override protected LeafSlice[] slices(List leaves) { - // For now using the static setting to get the targetMaxSlice value. It will be updated to dynamic mechanism as part of - // https://github.com/opensearch-project/OpenSearch/issues/8870 when lucene changes are available - return slicesInternal(leaves, SearchBootstrapSettings.getTargetMaxSlice()); + return slicesInternal(leaves, searchContext.getTargetMaxSliceCount()); } public DirectoryReader getDirectoryReader() { @@ -510,32 +510,17 @@ private boolean canMatchSearchAfter(LeafReaderContext ctx) throws IOException { ctx, primarySortField ); - return SearchService.canMatchSearchAfter(searchContext.searchAfter(), minMax, primarySortField); + return SearchService.canMatchSearchAfter( + searchContext.searchAfter(), + minMax, + primarySortField, + searchContext.trackTotalHitsUpTo() + ); } } return true; } - private boolean shouldReverseLeafReaderContexts() { - // Time series based workload by default traverses segments in desc order i.e. latest to the oldest order. - // This is actually beneficial for search queries to start search on latest segments first for time series workload. - // That can slow down ASC order queries on timestamp workload. So to avoid that slowdown, we will reverse leaf - // reader order here. - if (searchContext.indexShard().isTimeSeriesDescSortOptimizationEnabled()) { - // Only reverse order for asc order sort queries - if (searchContext.sort() != null - && searchContext.sort().sort != null - && searchContext.sort().sort.getSort() != null - && searchContext.sort().sort.getSort().length > 0 - && searchContext.sort().sort.getSort()[0].getReverse() == false - && searchContext.sort().sort.getSort()[0].getField() != null - && searchContext.sort().sort.getSort()[0].getField().equals(DataStream.TIMESERIES_FIELDNAME)) { - return true; - } - } - return false; - } - // package-private for testing LeafSlice[] slicesInternal(List leaves, int targetMaxSlice) { LeafSlice[] leafSlices; diff --git a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java index 02e6568369e16..327552cbfccdb 100644 --- a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java @@ -561,7 +561,17 @@ public BucketCollectorProcessor bucketCollectorProcessor() { } @Override - public boolean isConcurrentSegmentSearchEnabled() { - return in.isConcurrentSegmentSearchEnabled(); + public boolean shouldUseConcurrentSearch() { + return in.shouldUseConcurrentSearch(); + } + + @Override + public boolean shouldUseTimeSeriesDescSortOptimization() { + return in.shouldUseTimeSeriesDescSortOptimization(); + } + + @Override + public int getTargetMaxSliceCount() { + return in.getTargetMaxSliceCount(); } } diff --git a/server/src/main/java/org/opensearch/search/internal/InternalSearchResponse.java b/server/src/main/java/org/opensearch/search/internal/InternalSearchResponse.java index 1561d18f3040a..3af8fc3854cf1 100644 --- a/server/src/main/java/org/opensearch/search/internal/InternalSearchResponse.java +++ b/server/src/main/java/org/opensearch/search/internal/InternalSearchResponse.java @@ -32,17 +32,21 @@ package org.opensearch.search.internal; +import org.opensearch.Version; import org.opensearch.action.search.SearchResponseSections; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.search.SearchExtBuilder; import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.profile.SearchProfileShardResults; import org.opensearch.search.suggest.Suggest; import java.io.IOException; +import java.util.Collections; +import java.util.List; /** * {@link SearchResponseSections} subclass that can be serialized over the wire. @@ -67,7 +71,20 @@ public InternalSearchResponse( Boolean terminatedEarly, int numReducePhases ) { - super(hits, aggregations, suggest, timedOut, terminatedEarly, profileResults, numReducePhases); + this(hits, aggregations, suggest, profileResults, timedOut, terminatedEarly, numReducePhases, Collections.emptyList()); + } + + public InternalSearchResponse( + SearchHits hits, + InternalAggregations aggregations, + Suggest suggest, + SearchProfileShardResults profileResults, + boolean timedOut, + Boolean terminatedEarly, + int numReducePhases, + List searchExtBuilderList + ) { + super(hits, aggregations, suggest, timedOut, terminatedEarly, profileResults, numReducePhases, searchExtBuilderList); } public InternalSearchResponse(StreamInput in) throws IOException { @@ -78,7 +95,8 @@ public InternalSearchResponse(StreamInput in) throws IOException { in.readBoolean(), in.readOptionalBoolean(), in.readOptionalWriteable(SearchProfileShardResults::new), - in.readVInt() + in.readVInt(), + readSearchExtBuildersOnOrAfter(in) ); } @@ -91,5 +109,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalBoolean(terminatedEarly); out.writeOptionalWriteable(profileResults); out.writeVInt(numReducePhases); + writeSearchExtBuildersOnOrAfter(out, searchExtBuilders); + } + + private static List readSearchExtBuildersOnOrAfter(StreamInput in) throws IOException { + return (in.getVersion().onOrAfter(Version.V_2_10_0)) ? in.readNamedWriteableList(SearchExtBuilder.class) : Collections.emptyList(); + } + + private static void writeSearchExtBuildersOnOrAfter(StreamOutput out, List searchExtBuilders) throws IOException { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { + out.writeNamedWriteableList(searchExtBuilders); + } } } diff --git a/server/src/main/java/org/opensearch/search/internal/MaxTargetSliceSupplier.java b/server/src/main/java/org/opensearch/search/internal/MaxTargetSliceSupplier.java index 4b20ae6e771ea..64984585f3ab6 100644 --- a/server/src/main/java/org/opensearch/search/internal/MaxTargetSliceSupplier.java +++ b/server/src/main/java/org/opensearch/search/internal/MaxTargetSliceSupplier.java @@ -40,7 +40,7 @@ static IndexSearcher.LeafSlice[] getSlices(List leaves, int t // Sort by maxDoc, descending: sortedLeaves.sort(Collections.reverseOrder(Comparator.comparingInt(l -> l.reader().maxDoc()))); - final List> groupedLeaves = new ArrayList<>(); + final List> groupedLeaves = new ArrayList<>(targetSliceCount); for (int i = 0; i < targetSliceCount; ++i) { groupedLeaves.add(new ArrayList<>()); } diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index 4c239d7d83484..e1b527b057a6c 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -286,7 +286,7 @@ public final void assignRescoreDocIds(RescoreDocIds rescoreDocIds) { /** * Indicates if the current index should perform frequent low level search cancellation check. - * + *

            * Enabling low-level checks will make long running searches to react to the cancellation request faster. However, * since it will produce more cancellation checks it might slow the search performance down. */ @@ -399,7 +399,7 @@ public final void assignRescoreDocIds(RescoreDocIds rescoreDocIds) { /** * Returns concurrent segment search status for the search context */ - public boolean isConcurrentSegmentSearchEnabled() { + public boolean shouldUseConcurrentSearch() { return false; } @@ -407,7 +407,7 @@ public boolean isConcurrentSegmentSearchEnabled() { * Returns local bucket count thresholds based on concurrent segment search status */ public LocalBucketCountThresholds asLocalBucketCountThresholds(TermsAggregator.BucketCountThresholds bucketCountThresholds) { - if (isConcurrentSegmentSearchEnabled()) { + if (shouldUseConcurrentSearch()) { return new LocalBucketCountThresholds(0, ArrayUtil.MAX_ARRAY_LENGTH - 1); } else { return new LocalBucketCountThresholds(bucketCountThresholds.getShardMinDocCount(), bucketCountThresholds.getShardSize()); @@ -485,4 +485,8 @@ public String toString() { public abstract void setBucketCollectorProcessor(BucketCollectorProcessor bucketCollectorProcessor); public abstract BucketCollectorProcessor bucketCollectorProcessor(); + + public abstract boolean shouldUseTimeSeriesDescSortOptimization(); + + public abstract int getTargetMaxSliceCount(); } diff --git a/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java b/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java index 812030acb9561..734f62d048a5a 100644 --- a/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java @@ -180,7 +180,7 @@ public List extractRawValues(String path) { /** * For the provided path, return its value in the source. - * + *

            * Note that in contrast with {@link SourceLookup#extractRawValues}, array and object values * can be returned. * diff --git a/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java b/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java index d4292b85b20a5..8bab961423f91 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java +++ b/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java @@ -16,11 +16,13 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.search.SearchPhaseResult; +import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.concurrent.TimeUnit; @@ -117,92 +119,138 @@ protected void afterResponseProcessor(Processor processor, long timeInNanos) {} protected void onResponseProcessorFailed(Processor processor) {} - SearchRequest transformRequest(SearchRequest request) throws SearchPipelineProcessingException { - if (searchRequestProcessors.isEmpty() == false) { - long pipelineStart = relativeTimeSupplier.getAsLong(); - beforeTransformRequest(); - try { - try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { - request.writeTo(bytesStreamOutput); - try (StreamInput in = bytesStreamOutput.bytes().streamInput()) { - try (StreamInput input = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry)) { - request = new SearchRequest(input); - } - } - } - for (SearchRequestProcessor processor : searchRequestProcessors) { - beforeRequestProcessor(processor); - long start = relativeTimeSupplier.getAsLong(); - try { - request = processor.processRequest(request); - } catch (Exception e) { - onRequestProcessorFailed(processor); - if (processor.isIgnoreFailure()) { - logger.warn( - "The exception from request processor [" - + processor.getType() - + "] in the search pipeline [" - + id - + "] was ignored", - e - ); - } else { - throw e; - } - } finally { - long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); - afterRequestProcessor(processor, took); - } + void transformRequest(SearchRequest request, ActionListener requestListener) throws SearchPipelineProcessingException { + if (searchRequestProcessors.isEmpty()) { + requestListener.onResponse(request); + return; + } + + try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { + request.writeTo(bytesStreamOutput); + try (StreamInput in = bytesStreamOutput.bytes().streamInput()) { + try (StreamInput input = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry)) { + request = new SearchRequest(input); } - } catch (Exception e) { - onTransformRequestFailure(); - throw new SearchPipelineProcessingException(e); - } finally { - long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); - afterTransformRequest(took); } + } catch (IOException e) { + requestListener.onFailure(new SearchPipelineProcessingException(e)); + return; } - return request; + + ActionListener finalListener = getTerminalSearchRequestActionListener(requestListener); + + // Chain listeners back-to-front + ActionListener currentListener = finalListener; + for (int i = searchRequestProcessors.size() - 1; i >= 0; i--) { + final ActionListener nextListener = currentListener; + SearchRequestProcessor processor = searchRequestProcessors.get(i); + currentListener = ActionListener.wrap(r -> { + long start = relativeTimeSupplier.getAsLong(); + beforeRequestProcessor(processor); + processor.processRequestAsync(r, ActionListener.wrap(rr -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); + afterRequestProcessor(processor, took); + nextListener.onResponse(rr); + }, e -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); + afterRequestProcessor(processor, took); + onRequestProcessorFailed(processor); + if (processor.isIgnoreFailure()) { + logger.warn( + "The exception from request processor [" + + processor.getType() + + "] in the search pipeline [" + + id + + "] was ignored", + e + ); + nextListener.onResponse(r); + } else { + nextListener.onFailure(new SearchPipelineProcessingException(e)); + } + })); + }, finalListener::onFailure); + } + + beforeTransformRequest(); + currentListener.onResponse(request); } - SearchResponse transformResponse(SearchRequest request, SearchResponse response) throws SearchPipelineProcessingException { - if (searchResponseProcessors.isEmpty() == false) { - long pipelineStart = relativeTimeSupplier.getAsLong(); - beforeTransformResponse(); - try { - for (SearchResponseProcessor processor : searchResponseProcessors) { - beforeResponseProcessor(processor); - long start = relativeTimeSupplier.getAsLong(); - try { - response = processor.processResponse(request, response); - } catch (Exception e) { - onResponseProcessorFailed(processor); - if (processor.isIgnoreFailure()) { - logger.warn( - "The exception from response processor [" - + processor.getType() - + "] in the search pipeline [" - + id - + "] was ignored", - e - ); - } else { - throw e; - } - } finally { - long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); - afterResponseProcessor(processor, took); + private ActionListener getTerminalSearchRequestActionListener(ActionListener requestListener) { + final long pipelineStart = relativeTimeSupplier.getAsLong(); + + return ActionListener.wrap(r -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); + afterTransformRequest(took); + requestListener.onResponse(new PipelinedRequest(this, r)); + }, e -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); + afterTransformRequest(took); + onTransformRequestFailure(); + requestListener.onFailure(new SearchPipelineProcessingException(e)); + }); + } + + ActionListener transformResponseListener(SearchRequest request, ActionListener responseListener) { + if (searchResponseProcessors.isEmpty()) { + // No response transformation necessary + return responseListener; + } + + long[] pipelineStart = new long[1]; + + final ActionListener originalListener = responseListener; + responseListener = ActionListener.wrap(r -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart[0]); + afterTransformResponse(took); + originalListener.onResponse(r); + }, e -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart[0]); + afterTransformResponse(took); + onTransformResponseFailure(); + originalListener.onFailure(e); + }); + ActionListener finalListener = responseListener; // Jump directly to this one on exception. + + for (int i = searchResponseProcessors.size() - 1; i >= 0; i--) { + final ActionListener currentFinalListener = responseListener; + final SearchResponseProcessor processor = searchResponseProcessors.get(i); + + responseListener = ActionListener.wrap(r -> { + beforeResponseProcessor(processor); + final long start = relativeTimeSupplier.getAsLong(); + processor.processResponseAsync(request, r, ActionListener.wrap(rr -> { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); + afterResponseProcessor(processor, took); + currentFinalListener.onResponse(rr); + }, e -> { + onResponseProcessorFailed(processor); + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); + afterResponseProcessor(processor, took); + if (processor.isIgnoreFailure()) { + logger.warn( + "The exception from response processor [" + + processor.getType() + + "] in the search pipeline [" + + id + + "] was ignored", + e + ); + // Pass the previous response through to the next processor in the chain + currentFinalListener.onResponse(r); + } else { + currentFinalListener.onFailure(new SearchPipelineProcessingException(e)); } - } - } catch (Exception e) { - onTransformResponseFailure(); - throw new SearchPipelineProcessingException(e); - } finally { - long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); - afterTransformResponse(took); - } + })); + }, finalListener::onFailure); } - return response; + final ActionListener chainListener = responseListener; + return ActionListener.wrap(r -> { + beforeTransformResponse(); + pipelineStart[0] = relativeTimeSupplier.getAsLong(); + chainListener.onResponse(r); + }, originalListener::onFailure); + } void runSearchPhaseResultsTransformer( diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java b/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java index 9e5f2fa1592a4..bc82814e88bbd 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java @@ -31,7 +31,7 @@ /** * TODO: Copied verbatim from {@link org.opensearch.ingest.PipelineConfiguration}. - * + *

            * See if we can refactor into a common class. I suspect not, just because this one will hold */ public class PipelineConfiguration extends AbstractDiffable implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java b/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java index 5a7539808c127..77dfc6bcd4fc5 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java @@ -12,6 +12,7 @@ import org.opensearch.action.search.SearchPhaseResults; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; +import org.opensearch.core.action.ActionListener; import org.opensearch.search.SearchPhaseResult; /** @@ -27,8 +28,12 @@ public final class PipelinedRequest extends SearchRequest { this.pipeline = pipeline; } - public SearchResponse transformResponse(SearchResponse response) { - return pipeline.transformResponse(this, response); + public void transformRequest(ActionListener requestListener) { + pipeline.transformRequest(this, requestListener); + } + + public ActionListener transformResponseListener(ActionListener responseListener) { + return pipeline.transformResponseListener(this, responseListener); } public void transformSearchPhaseResults( diff --git a/server/src/main/java/org/opensearch/search/pipeline/Processor.java b/server/src/main/java/org/opensearch/search/pipeline/Processor.java index fb33f46acada4..0120d68ceb5aa 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/Processor.java +++ b/server/src/main/java/org/opensearch/search/pipeline/Processor.java @@ -15,7 +15,7 @@ * Whether changes are made and what exactly is modified is up to the implementation. *

            * Processors may get called concurrently and thus need to be thread-safe. - * + *

            * TODO: Refactor {@link org.opensearch.ingest.Processor} to extend this interface, and specialize to IngestProcessor. * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/pipeline/ProcessorInfo.java b/server/src/main/java/org/opensearch/search/pipeline/ProcessorInfo.java index 0206b9b6cf716..0b80cdbef6669 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/ProcessorInfo.java +++ b/server/src/main/java/org/opensearch/search/pipeline/ProcessorInfo.java @@ -18,7 +18,7 @@ /** * Information about a search pipeline processor - * + *

            * TODO: This is copy/pasted from the ingest ProcessorInfo. * Can/should we share implementation or is this just boilerplate? * diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java index 739101519ff98..580fe1b7c4216 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java @@ -408,8 +408,7 @@ public PipelinedRequest resolvePipeline(SearchRequest searchRequest) { pipeline = pipelineHolder.pipeline; } } - SearchRequest transformedRequest = pipeline.transformRequest(searchRequest); - return new PipelinedRequest(pipeline, transformedRequest); + return new PipelinedRequest(pipeline, searchRequest); } Map> getRequestProcessorFactories() { diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchRequestProcessor.java b/server/src/main/java/org/opensearch/search/pipeline/SearchRequestProcessor.java index c236cde1a5cc0..427c9e4ab694c 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchRequestProcessor.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchRequestProcessor.java @@ -9,10 +9,37 @@ package org.opensearch.search.pipeline; import org.opensearch.action.search.SearchRequest; +import org.opensearch.core.action.ActionListener; /** * Interface for a search pipeline processor that modifies a search request. */ public interface SearchRequestProcessor extends Processor { + + /** + * Transform a {@link SearchRequest}. Executed on the coordinator node before any {@link org.opensearch.action.search.SearchPhase} + * executes. + *

            + * Implement this method if the processor makes no asynchronous calls. + * @param request the executed {@link SearchRequest} + * @return a new {@link SearchRequest} (or the input {@link SearchRequest} if no changes) + * @throws Exception if an error occurs during processing + */ SearchRequest processRequest(SearchRequest request) throws Exception; + + /** + * Transform a {@link SearchRequest}. Executed on the coordinator node before any {@link org.opensearch.action.search.SearchPhase} + * executes. + *

            + * Expert method: Implement this if the processor needs to make asynchronous calls. Otherwise, implement processRequest. + * @param request the executed {@link SearchRequest} + * @param requestListener callback to be invoked on successful processing or on failure + */ + default void processRequestAsync(SearchRequest request, ActionListener requestListener) { + try { + requestListener.onResponse(processRequest(request)); + } catch (Exception e) { + requestListener.onFailure(e); + } + } } diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchResponseProcessor.java b/server/src/main/java/org/opensearch/search/pipeline/SearchResponseProcessor.java index 2f22cedb9b5c0..21136ce208fee 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchResponseProcessor.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchResponseProcessor.java @@ -10,10 +10,37 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; +import org.opensearch.core.action.ActionListener; /** * Interface for a search pipeline processor that modifies a search response. */ public interface SearchResponseProcessor extends Processor { + + /** + * Transform a {@link SearchResponse}, possibly based on the executed {@link SearchRequest}. + *

            + * Implement this method if the processor makes no asynchronous calls. + * @param request the executed {@link SearchRequest} + * @param response the current {@link SearchResponse}, possibly modified by earlier processors + * @return a modified {@link SearchResponse} (or the input {@link SearchResponse} if no changes) + * @throws Exception if an error occurs during processing + */ SearchResponse processResponse(SearchRequest request, SearchResponse response) throws Exception; + + /** + * Transform a {@link SearchResponse}, possibly based on the executed {@link SearchRequest}. + *

            + * Expert method: Implement this if the processor needs to make asynchronous calls. Otherwise, implement processResponse. + * @param request the executed {@link SearchRequest} + * @param response the current {@link SearchResponse}, possibly modified by earlier processors + * @param responseListener callback to be invoked on successful processing or on failure + */ + default void processResponseAsync(SearchRequest request, SearchResponse response, ActionListener responseListener) { + try { + responseListener.onResponse(processResponse(request, response)); + } catch (Exception e) { + responseListener.onFailure(e); + } + } } diff --git a/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java b/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java index 4d0949624ebed..904b04b249b1b 100644 --- a/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/AbstractInternalProfileTree.java @@ -70,7 +70,7 @@ public AbstractInternalProfileTree() { * Returns a {@link QueryProfileBreakdown} for a scoring query. Scoring queries (e.g. those * that are past the rewrite phase and are now being wrapped by createWeight() ) follow * a recursive progression. We can track the dependency tree by a simple stack - * + *

            * The only hiccup is that the first scoring query will be identical to the last rewritten * query, so we need to take special care to fix that * @@ -109,7 +109,7 @@ public PB getProfileBreakdown(E query) { /** * Helper method to add a new node to the dependency tree. - * + *

            * Initializes a new list in the dependency tree, saves the query and * generates a new {@link AbstractProfileBreakdown} to track the timings * of this element. @@ -180,6 +180,10 @@ private ProfileResult doGetTree(int token) { // calculating the same times over and over...but worth the effort? String type = getTypeFromElement(element); String description = getDescriptionFromElement(element); + return createProfileResult(type, description, breakdown, childrenProfileResults); + } + + protected ProfileResult createProfileResult(String type, String description, PB breakdown, List childrenProfileResults) { return new ProfileResult( type, description, diff --git a/server/src/main/java/org/opensearch/search/profile/AbstractProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/AbstractProfileBreakdown.java index 67ab062c0e3ca..4a1563e7cdce9 100644 --- a/server/src/main/java/org/opensearch/search/profile/AbstractProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/AbstractProfileBreakdown.java @@ -80,6 +80,7 @@ public Map toBreakdownMap() { for (T timingType : this.timingTypes) { map.put(timingType.toString(), this.timings[timingType.ordinal()].getApproximateTiming()); map.put(timingType + TIMING_TYPE_COUNT_SUFFIX, this.timings[timingType.ordinal()].getCount()); + map.put(timingType + TIMING_TYPE_START_TIME_SUFFIX, this.timings[timingType.ordinal()].getEarliestTimerStartTime()); } return Collections.unmodifiableMap(map); } @@ -87,11 +88,11 @@ public Map toBreakdownMap() { /** * Fetch extra debugging information. */ - protected Map toDebugMap() { + public Map toDebugMap() { return emptyMap(); } - public final long toNodeTime() { + public long toNodeTime() { long total = 0; for (T timingType : timingTypes) { total += timings[timingType.ordinal()].getApproximateTiming(); diff --git a/server/src/main/java/org/opensearch/search/profile/ContextualProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/ContextualProfileBreakdown.java index 4f071f730cd45..3fe621321c8ad 100644 --- a/server/src/main/java/org/opensearch/search/profile/ContextualProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/ContextualProfileBreakdown.java @@ -8,6 +8,12 @@ package org.opensearch.search.profile; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; + +import java.util.List; +import java.util.Map; + /** * Provide contextual profile breakdowns which are associated with freestyle context. Used when concurrent * search over segments is activated and each collector needs own non-shareable profile breakdown instance. @@ -25,4 +31,8 @@ public ContextualProfileBreakdown(Class clazz) { * @return contextual profile breakdown instance */ public abstract AbstractProfileBreakdown context(Object context); + + public void associateCollectorToLeaves(Collector collector, LeafReaderContext leaf) {} + + public void associateCollectorsToLeaves(Map> collectorToLeaves) {} } diff --git a/server/src/main/java/org/opensearch/search/profile/Profilers.java b/server/src/main/java/org/opensearch/search/profile/Profilers.java index 8e87c7ff4acd4..68cf05c988b5b 100644 --- a/server/src/main/java/org/opensearch/search/profile/Profilers.java +++ b/server/src/main/java/org/opensearch/search/profile/Profilers.java @@ -35,6 +35,9 @@ import org.opensearch.search.internal.ContextIndexSearcher; import org.opensearch.search.profile.aggregation.AggregationProfiler; import org.opensearch.search.profile.aggregation.ConcurrentAggregationProfiler; +import org.opensearch.search.profile.query.ConcurrentQueryProfileTree; +import org.opensearch.search.profile.query.ConcurrentQueryProfiler; +import org.opensearch.search.profile.query.InternalQueryProfileTree; import org.opensearch.search.profile.query.QueryProfiler; import java.util.ArrayList; @@ -64,7 +67,9 @@ public Profilers(ContextIndexSearcher searcher, boolean isConcurrentSegmentSearc /** Switch to a new profile. */ public QueryProfiler addQueryProfiler() { - QueryProfiler profiler = new QueryProfiler(isConcurrentSegmentSearchEnabled); + QueryProfiler profiler = isConcurrentSegmentSearchEnabled + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); queryProfilers.add(profiler); return profiler; diff --git a/server/src/main/java/org/opensearch/search/profile/Timer.java b/server/src/main/java/org/opensearch/search/profile/Timer.java index 172762cabeb6a..864c689cf7fa0 100644 --- a/server/src/main/java/org/opensearch/search/profile/Timer.java +++ b/server/src/main/java/org/opensearch/search/profile/Timer.java @@ -53,6 +53,18 @@ public class Timer { private boolean doTiming; private long timing, count, lastCount, start, earliestTimerStartTime; + public Timer() { + this(0, 0, 0, 0, 0); + } + + public Timer(long timing, long count, long lastCount, long start, long earliestTimerStartTime) { + this.timing = timing; + this.count = count; + this.lastCount = lastCount; + this.start = start; + this.earliestTimerStartTime = earliestTimerStartTime; + } + /** pkg-private for testing */ long nanoTime() { return System.nanoTime(); diff --git a/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileBreakdown.java index d0c67915e6d8d..9fa628f107f7b 100644 --- a/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/aggregation/AggregationProfileBreakdown.java @@ -34,7 +34,6 @@ import org.opensearch.search.profile.AbstractProfileBreakdown; -import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -60,21 +59,7 @@ public void addDebugInfo(String key, Object value) { } @Override - protected Map toDebugMap() { + public Map toDebugMap() { return unmodifiableMap(extra); } - - /** - * Build a timing count startTime breakdown for aggregation timing types - */ - @Override - public Map toBreakdownMap() { - Map map = new HashMap<>(timings.length * 3); - for (AggregationTimingType timingType : timingTypes) { - map.put(timingType.toString(), timings[timingType.ordinal()].getApproximateTiming()); - map.put(timingType + TIMING_TYPE_COUNT_SUFFIX, timings[timingType.ordinal()].getCount()); - map.put(timingType + TIMING_TYPE_START_TIME_SUFFIX, timings[timingType.ordinal()].getEarliestTimerStartTime()); - } - return Collections.unmodifiableMap(map); - } } diff --git a/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java b/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java new file mode 100644 index 0000000000000..2f5d632ee2d87 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/profile/query/AbstractQueryProfileTree.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.apache.lucene.search.Query; +import org.opensearch.search.profile.AbstractInternalProfileTree; +import org.opensearch.search.profile.ContextualProfileBreakdown; +import org.opensearch.search.profile.ProfileResult; + +/** + * This class tracks the dependency tree for queries (scoring and rewriting) and + * generates {@link QueryProfileBreakdown} for each node in the tree. It also finalizes the tree + * and returns a list of {@link ProfileResult} that can be serialized back to the client + * + * @opensearch.internal + */ +public abstract class AbstractQueryProfileTree extends AbstractInternalProfileTree, Query> { + + /** Rewrite time */ + private long rewriteTime; + private long rewriteScratch; + + @Override + protected String getTypeFromElement(Query query) { + // Anonymous classes won't have a name, + // we need to get the super class + if (query.getClass().getSimpleName().isEmpty()) { + return query.getClass().getSuperclass().getSimpleName(); + } + return query.getClass().getSimpleName(); + } + + @Override + protected String getDescriptionFromElement(Query query) { + return query.toString(); + } + + /** + * Begin timing a query for a specific Timing context + */ + public void startRewriteTime() { + assert rewriteScratch == 0; + rewriteScratch = System.nanoTime(); + } + + /** + * Halt the timing process and add the elapsed rewriting time. + * startRewriteTime() must be called for a particular context prior to calling + * stopAndAddRewriteTime(), otherwise the elapsed time will be negative and + * nonsensical + */ + public void stopAndAddRewriteTime() { + long time = Math.max(1, System.nanoTime() - rewriteScratch); + rewriteTime += time; + rewriteScratch = 0; + } + + public long getRewriteTime() { + return rewriteTime; + } +} diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java index 6f0c78e8b307d..59ef01f9f947a 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdown.java @@ -8,10 +8,16 @@ package org.opensearch.search.profile.query; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; +import org.opensearch.OpenSearchException; import org.opensearch.search.profile.AbstractProfileBreakdown; import org.opensearch.search.profile.ContextualProfileBreakdown; +import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -23,8 +29,22 @@ * @opensearch.internal */ public final class ConcurrentQueryProfileBreakdown extends ContextualProfileBreakdown { + static final String SLICE_END_TIME_SUFFIX = "_slice_end_time"; + static final String SLICE_START_TIME_SUFFIX = "_slice_start_time"; + static final String MAX_PREFIX = "max_"; + static final String MIN_PREFIX = "min_"; + static final String AVG_PREFIX = "avg_"; + private long queryNodeTime = Long.MIN_VALUE; + private long maxSliceNodeTime = Long.MIN_VALUE; + private long minSliceNodeTime = Long.MAX_VALUE; + private long avgSliceNodeTime = 0L; + + // keep track of all breakdown timings per segment. package-private for testing private final Map> contexts = new ConcurrentHashMap<>(); + // represents slice to leaves mapping as for each slice a unique collector instance is created + private final Map> sliceCollectorsToLeaves = new ConcurrentHashMap<>(); + /** Sole constructor. */ public ConcurrentQueryProfileBreakdown() { super(QueryTimingType.class); @@ -44,14 +64,334 @@ public AbstractProfileBreakdown context(Object context) { @Override public Map toBreakdownMap() { - final Map map = new HashMap<>(super.toBreakdownMap()); + final Map topLevelBreakdownMapWithWeightTime = super.toBreakdownMap(); + final long createWeightStartTime = topLevelBreakdownMapWithWeightTime.get( + QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_START_TIME_SUFFIX + ); + final long createWeightTime = topLevelBreakdownMapWithWeightTime.get(QueryTimingType.CREATE_WEIGHT.toString()); + + if (contexts.isEmpty()) { + // If there are no leaf contexts, then return the default concurrent query level breakdown, which will include the + // create_weight time/count + queryNodeTime = createWeightTime; + maxSliceNodeTime = 0L; + minSliceNodeTime = 0L; + avgSliceNodeTime = 0L; + return buildDefaultQueryBreakdownMap(createWeightTime); + } else if (sliceCollectorsToLeaves.isEmpty()) { + // This will happen when each slice executes search leaf for its leaves and query is rewritten for the leaf being searched. It + // creates a new weight and breakdown map for each rewritten query. This new breakdown map captures the timing information for + // the new rewritten query. The sliceCollectorsToLeaves is empty because this breakdown for rewritten query gets created later + // in search leaf path which doesn't have collector. Also, this is not needed since this breakdown is per leaf and there is no + // concurrency involved. An empty sliceCollectorsToLeaves could also happen in the case of early termination. + AbstractProfileBreakdown breakdown = contexts.values().iterator().next(); + queryNodeTime = breakdown.toNodeTime() + createWeightTime; + maxSliceNodeTime = 0L; + minSliceNodeTime = 0L; + avgSliceNodeTime = 0L; + Map queryBreakdownMap = new HashMap<>(breakdown.toBreakdownMap()); + queryBreakdownMap.put(QueryTimingType.CREATE_WEIGHT.toString(), createWeightTime); + queryBreakdownMap.put(QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_COUNT_SUFFIX, 1L); + return queryBreakdownMap; + } + + // first create the slice level breakdowns + final Map> sliceLevelBreakdowns = buildSliceLevelBreakdown(); + return buildQueryBreakdownMap(sliceLevelBreakdowns, createWeightTime, createWeightStartTime); + } + + /** + * @param createWeightTime time for creating weight + * @return default breakdown map for concurrent query which includes the create weight time and all other timing type stats in the + * breakdown has default value of 0. For concurrent search case, the max/min/avg stats for each timing type will also be 0 in this + * default breakdown map. + */ + private Map buildDefaultQueryBreakdownMap(long createWeightTime) { + final Map concurrentQueryBreakdownMap = new HashMap<>(); + for (QueryTimingType timingType : QueryTimingType.values()) { + final String timingTypeKey = timingType.toString(); + final String timingTypeCountKey = timingTypeKey + TIMING_TYPE_COUNT_SUFFIX; - for (final AbstractProfileBreakdown context : contexts.values()) { - for (final Map.Entry entry : context.toBreakdownMap().entrySet()) { - map.merge(entry.getKey(), entry.getValue(), Long::sum); + if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + concurrentQueryBreakdownMap.put(timingTypeKey, createWeightTime); + concurrentQueryBreakdownMap.put(timingTypeCountKey, 1L); + continue; } + final String maxBreakdownTypeTime = MAX_PREFIX + timingTypeKey; + final String minBreakdownTypeTime = MIN_PREFIX + timingTypeKey; + final String avgBreakdownTypeTime = AVG_PREFIX + timingTypeKey; + final String maxBreakdownTypeCount = MAX_PREFIX + timingTypeCountKey; + final String minBreakdownTypeCount = MIN_PREFIX + timingTypeCountKey; + final String avgBreakdownTypeCount = AVG_PREFIX + timingTypeCountKey; + // add time related stats + concurrentQueryBreakdownMap.put(timingTypeKey, 0L); + concurrentQueryBreakdownMap.put(maxBreakdownTypeTime, 0L); + concurrentQueryBreakdownMap.put(minBreakdownTypeTime, 0L); + concurrentQueryBreakdownMap.put(avgBreakdownTypeTime, 0L); + // add count related stats + concurrentQueryBreakdownMap.put(timingTypeCountKey, 0L); + concurrentQueryBreakdownMap.put(maxBreakdownTypeCount, 0L); + concurrentQueryBreakdownMap.put(minBreakdownTypeCount, 0L); + concurrentQueryBreakdownMap.put(avgBreakdownTypeCount, 0L); } + return concurrentQueryBreakdownMap; + } + + /** + * Computes the slice level breakdownMap. It uses sliceCollectorsToLeaves to figure out all the leaves or segments part of a slice. + * Then use the breakdown timing stats for each of these leaves to calculate the breakdown stats at slice level. + * + * @return map of collector (or slice) to breakdown map + */ + Map> buildSliceLevelBreakdown() { + final Map> sliceLevelBreakdowns = new HashMap<>(); + long totalSliceNodeTime = 0L; + for (Map.Entry> slice : sliceCollectorsToLeaves.entrySet()) { + final Collector sliceCollector = slice.getKey(); + // initialize each slice level breakdown + final Map currentSliceBreakdown = sliceLevelBreakdowns.computeIfAbsent(sliceCollector, k -> new HashMap<>()); + // max slice end time across all timing types + long sliceMaxEndTime = Long.MIN_VALUE; + long sliceMinStartTime = Long.MAX_VALUE; + for (QueryTimingType timingType : QueryTimingType.values()) { + if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + // do nothing for create weight as that is query level time and not slice level + continue; + } + + // for each timing type compute maxSliceEndTime and minSliceStartTime. Also add the counts of timing type to + // compute total count at slice level + final String timingTypeCountKey = timingType + TIMING_TYPE_COUNT_SUFFIX; + final String timingTypeStartKey = timingType + TIMING_TYPE_START_TIME_SUFFIX; + final String timingTypeSliceStartTimeKey = timingType + SLICE_START_TIME_SUFFIX; + final String timingTypeSliceEndTimeKey = timingType + SLICE_END_TIME_SUFFIX; + + for (LeafReaderContext sliceLeaf : slice.getValue()) { + if (!contexts.containsKey(sliceLeaf)) { + // In case like early termination, the sliceCollectorToLeave association will be added for a + // leaf, but the leaf level breakdown will not be created in the contexts map. + // This is because before updating the contexts map, the query hits earlyTerminationException. + // To handle such case, we will ignore the leaf that is not present. + // + // Other than early termination, it can also happen in other cases. For example: there is a must boolean query + // with 2 boolean clauses. While creating scorer for first clause if no docs are found for the field in a leaf + // context then it will return null scorer. Then for 2nd clause weight as well no scorer will be created for this + // leaf context (as it is a must query). Due to this it will end up missing the leaf context in the contexts map + // for second clause weight. + continue; + } + final Map currentSliceLeafBreakdownMap = contexts.get(sliceLeaf).toBreakdownMap(); + // get the count for current leaf timing type + currentSliceBreakdown.compute( + timingTypeCountKey, + (key, value) -> (value == null) + ? currentSliceLeafBreakdownMap.get(timingTypeCountKey) + : value + currentSliceLeafBreakdownMap.get(timingTypeCountKey) + ); + + // compute the sliceEndTime for timingType using max of endTime across slice leaves + final long sliceLeafTimingTypeEndTime = currentSliceLeafBreakdownMap.get(timingTypeStartKey) + + currentSliceLeafBreakdownMap.get(timingType.toString()); + currentSliceBreakdown.compute( + timingTypeSliceEndTimeKey, + (key, value) -> (value == null) ? sliceLeafTimingTypeEndTime : Math.max(value, sliceLeafTimingTypeEndTime) + ); + + // compute the sliceStartTime for timingType using min of startTime across slice leaves + final long sliceLeafTimingTypeStartTime = currentSliceLeafBreakdownMap.get(timingTypeStartKey); + currentSliceBreakdown.compute( + timingTypeSliceStartTimeKey, + (key, value) -> (value == null) ? sliceLeafTimingTypeStartTime : Math.min(value, sliceLeafTimingTypeStartTime) + ); + } + // compute sliceMaxEndTime as max of sliceEndTime across all timing types + sliceMaxEndTime = Math.max(sliceMaxEndTime, currentSliceBreakdown.getOrDefault(timingTypeSliceEndTimeKey, Long.MIN_VALUE)); + long currentSliceStartTime = currentSliceBreakdown.getOrDefault(timingTypeSliceStartTimeKey, Long.MAX_VALUE); + if (currentSliceStartTime == 0L) { + // The timer for the current timing type never starts, so we continue here + continue; + } + sliceMinStartTime = Math.min(sliceMinStartTime, currentSliceStartTime); + // compute total time for each timing type at slice level using sliceEndTime and sliceStartTime + currentSliceBreakdown.put( + timingType.toString(), + currentSliceBreakdown.getOrDefault(timingTypeSliceEndTimeKey, 0L) - currentSliceBreakdown.getOrDefault( + timingTypeSliceStartTimeKey, + 0L + ) + ); + } + // currentSliceNodeTime does not include the create weight time, as that is computed in non-concurrent part + long currentSliceNodeTime; + if (sliceMinStartTime == Long.MAX_VALUE && sliceMaxEndTime == Long.MIN_VALUE) { + currentSliceNodeTime = 0L; + } else if (sliceMinStartTime == Long.MAX_VALUE || sliceMaxEndTime == Long.MIN_VALUE) { + throw new OpenSearchException( + "Unexpected value of sliceMinStartTime [" + + sliceMinStartTime + + "] or sliceMaxEndTime [" + + sliceMaxEndTime + + "] while computing the slice level timing profile breakdowns" + ); + } else { + currentSliceNodeTime = sliceMaxEndTime - sliceMinStartTime; + } + + // compute max/min slice times + maxSliceNodeTime = Math.max(maxSliceNodeTime, currentSliceNodeTime); + minSliceNodeTime = Math.min(minSliceNodeTime, currentSliceNodeTime); + // total time at query level + totalSliceNodeTime += currentSliceNodeTime; + } + avgSliceNodeTime = totalSliceNodeTime / sliceCollectorsToLeaves.size(); + return sliceLevelBreakdowns; + } + + /** + * Computes the query level breakdownMap using the breakdown maps of all the slices. In query level breakdown map, it has the + * time/count stats for each breakdown type. Total time per breakdown type at query level is computed by subtracting the max of slice + * end time with min of slice start time for that type. Count for each breakdown type at query level is sum of count of that type + * across slices. Other than these, there are max/min/avg stats across slices for each breakdown type + * + * @param sliceLevelBreakdowns breakdown map for all the slices + * @param createWeightTime time for create weight + * @param createWeightStartTime start time for create weight + * @return breakdown map for entire query + */ + public Map buildQueryBreakdownMap( + Map> sliceLevelBreakdowns, + long createWeightTime, + long createWeightStartTime + ) { + final Map queryBreakdownMap = new HashMap<>(); + long queryEndTime = Long.MIN_VALUE; + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + final String timingTypeKey = queryTimingType.toString(); + final String timingTypeCountKey = timingTypeKey + TIMING_TYPE_COUNT_SUFFIX; + final String sliceEndTimeForTimingType = timingTypeKey + SLICE_END_TIME_SUFFIX; + final String sliceStartTimeForTimingType = timingTypeKey + SLICE_START_TIME_SUFFIX; + + final String maxBreakdownTypeTime = MAX_PREFIX + timingTypeKey; + final String minBreakdownTypeTime = MIN_PREFIX + timingTypeKey; + final String avgBreakdownTypeTime = AVG_PREFIX + timingTypeKey; + final String maxBreakdownTypeCount = MAX_PREFIX + timingTypeCountKey; + final String minBreakdownTypeCount = MIN_PREFIX + timingTypeCountKey; + final String avgBreakdownTypeCount = AVG_PREFIX + timingTypeCountKey; + + long queryTimingTypeEndTime = Long.MIN_VALUE; + long queryTimingTypeStartTime = Long.MAX_VALUE; + long queryTimingTypeCount = 0L; + + // the create weight time is computed at the query level and is called only once per query + if (queryTimingType == QueryTimingType.CREATE_WEIGHT) { + queryBreakdownMap.put(timingTypeCountKey, 1L); + queryBreakdownMap.put(timingTypeKey, createWeightTime); + continue; + } + + // for all other timing types, we will compute min/max/avg/total across slices + for (Map.Entry> sliceBreakdown : sliceLevelBreakdowns.entrySet()) { + long sliceBreakdownTypeTime = sliceBreakdown.getValue().getOrDefault(timingTypeKey, 0L); + long sliceBreakdownTypeCount = sliceBreakdown.getValue().getOrDefault(timingTypeCountKey, 0L); + // compute max/min/avg TimingType time across slices + queryBreakdownMap.compute( + maxBreakdownTypeTime, + (key, value) -> (value == null) ? sliceBreakdownTypeTime : Math.max(sliceBreakdownTypeTime, value) + ); + queryBreakdownMap.compute( + minBreakdownTypeTime, + (key, value) -> (value == null) ? sliceBreakdownTypeTime : Math.min(sliceBreakdownTypeTime, value) + ); + queryBreakdownMap.compute( + avgBreakdownTypeTime, + (key, value) -> (value == null) ? sliceBreakdownTypeTime : sliceBreakdownTypeTime + value + ); + + // compute max/min/avg TimingType count across slices + queryBreakdownMap.compute( + maxBreakdownTypeCount, + (key, value) -> (value == null) ? sliceBreakdownTypeCount : Math.max(sliceBreakdownTypeCount, value) + ); + queryBreakdownMap.compute( + minBreakdownTypeCount, + (key, value) -> (value == null) ? sliceBreakdownTypeCount : Math.min(sliceBreakdownTypeCount, value) + ); + queryBreakdownMap.compute( + avgBreakdownTypeCount, + (key, value) -> (value == null) ? sliceBreakdownTypeCount : sliceBreakdownTypeCount + value + ); + + // query start/end time for a TimingType is min/max of start/end time across slices for that TimingType + queryTimingTypeEndTime = Math.max( + queryTimingTypeEndTime, + sliceBreakdown.getValue().getOrDefault(sliceEndTimeForTimingType, Long.MIN_VALUE) + ); + queryTimingTypeStartTime = Math.min( + queryTimingTypeStartTime, + sliceBreakdown.getValue().getOrDefault(sliceStartTimeForTimingType, Long.MAX_VALUE) + ); + queryTimingTypeCount += sliceBreakdownTypeCount; + } + + if (queryTimingTypeStartTime == Long.MAX_VALUE || queryTimingTypeEndTime == Long.MIN_VALUE) { + throw new OpenSearchException( + "Unexpected timing type [" + + timingTypeKey + + "] start [" + + queryTimingTypeStartTime + + "] or end time [" + + queryTimingTypeEndTime + + "] computed across slices for profile results" + ); + } + queryBreakdownMap.put(timingTypeKey, queryTimingTypeEndTime - queryTimingTypeStartTime); + queryBreakdownMap.put(timingTypeCountKey, queryTimingTypeCount); + queryBreakdownMap.compute(avgBreakdownTypeTime, (key, value) -> (value == null) ? 0L : value / sliceLevelBreakdowns.size()); + queryBreakdownMap.compute(avgBreakdownTypeCount, (key, value) -> (value == null) ? 0L : value / sliceLevelBreakdowns.size()); + // compute query end time using max of query end time across all timing types + queryEndTime = Math.max(queryEndTime, queryTimingTypeEndTime); + } + if (queryEndTime == Long.MIN_VALUE) { + throw new OpenSearchException("Unexpected error while computing the query end time across slices in profile result"); + } + queryNodeTime = queryEndTime - createWeightStartTime; + return queryBreakdownMap; + } + + @Override + public long toNodeTime() { + return queryNodeTime; + } + + @Override + public void associateCollectorToLeaves(Collector collector, LeafReaderContext leaf) { + // Each slice (or collector) is executed by single thread. So the list for a key will always be updated by a single thread only + sliceCollectorsToLeaves.computeIfAbsent(collector, k -> new ArrayList<>()).add(leaf); + } + + @Override + public void associateCollectorsToLeaves(Map> collectorsToLeaves) { + sliceCollectorsToLeaves.putAll(collectorsToLeaves); + } + + Map> getSliceCollectorsToLeaves() { + return Collections.unmodifiableMap(sliceCollectorsToLeaves); + } + + // used by tests + Map> getContexts() { + return contexts; + } + + long getMaxSliceNodeTime() { + return maxSliceNodeTime; + } + + long getMinSliceNodeTime() { + return minSliceNodeTime; + } - return map; + long getAvgSliceNodeTime() { + return avgSliceNodeTime; } } diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileTree.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileTree.java new file mode 100644 index 0000000000000..4e54178c3b4fb --- /dev/null +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfileTree.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; +import org.opensearch.search.profile.ContextualProfileBreakdown; +import org.opensearch.search.profile.ProfileResult; + +import java.util.List; +import java.util.Map; + +/** + * This class returns a list of {@link ProfileResult} that can be serialized back to the client in the concurrent execution. + * + * @opensearch.internal + */ +public class ConcurrentQueryProfileTree extends AbstractQueryProfileTree { + + @Override + protected ContextualProfileBreakdown createProfileBreakdown() { + return new ConcurrentQueryProfileBreakdown(); + } + + @Override + protected ProfileResult createProfileResult( + String type, + String description, + ContextualProfileBreakdown breakdown, + List childrenProfileResults + ) { + assert breakdown instanceof ConcurrentQueryProfileBreakdown; + final ConcurrentQueryProfileBreakdown concurrentBreakdown = (ConcurrentQueryProfileBreakdown) breakdown; + return new ProfileResult( + type, + description, + concurrentBreakdown.toBreakdownMap(), + concurrentBreakdown.toDebugMap(), + concurrentBreakdown.toNodeTime(), + childrenProfileResults, + concurrentBreakdown.getMaxSliceNodeTime(), + concurrentBreakdown.getMinSliceNodeTime(), + concurrentBreakdown.getAvgSliceNodeTime() + ); + } + + /** + * For concurrent query case, when there are nested queries (with children), then the {@link ConcurrentQueryProfileBreakdown} created + * for the child queries weight doesn't have the association of collector to leaves. This is because child query weights are not + * exposed by the {@link org.apache.lucene.search.Weight} interface. So after all the collection is happened and before the result + * tree is created we need to pass the association from parent to the child breakdowns. This will be then used to create the + * breakdown map at slice level for the child queries as well + * + * @return a hierarchical representation of the profiled query tree + */ + @Override + public List getTree() { + for (Integer root : roots) { + final ContextualProfileBreakdown parentBreakdown = breakdowns.get(root); + assert parentBreakdown instanceof ConcurrentQueryProfileBreakdown; + final Map> parentCollectorToLeaves = ((ConcurrentQueryProfileBreakdown) parentBreakdown) + .getSliceCollectorsToLeaves(); + // update all the children with the parent collectorToLeaves association + updateCollectorToLeavesForChildBreakdowns(root, parentCollectorToLeaves); + } + // once the collector to leaves mapping is updated, get the result + return super.getTree(); + } + + /** + * Updates the children with collector to leaves mapping as recorded by parent breakdown + * @param parentToken parent token number in the tree + * @param collectorToLeaves collector to leaves mapping recorded by parent + */ + private void updateCollectorToLeavesForChildBreakdowns(Integer parentToken, Map> collectorToLeaves) { + final List children = tree.get(parentToken); + if (children != null) { + for (Integer currentChild : children) { + final ContextualProfileBreakdown currentChildBreakdown = breakdowns.get(currentChild); + currentChildBreakdown.associateCollectorsToLeaves(collectorToLeaves); + updateCollectorToLeavesForChildBreakdowns(currentChild, collectorToLeaves); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java new file mode 100644 index 0000000000000..42bf23bb13fbe --- /dev/null +++ b/server/src/main/java/org/opensearch/search/profile/query/ConcurrentQueryProfiler.java @@ -0,0 +1,134 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.apache.lucene.search.Query; +import org.opensearch.search.profile.ContextualProfileBreakdown; +import org.opensearch.search.profile.ProfileResult; +import org.opensearch.search.profile.Timer; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * This class acts as a thread-local storage for profiling a query with concurrent execution + * + * @opensearch.internal + */ +public final class ConcurrentQueryProfiler extends QueryProfiler { + + private final Map threadToProfileTree; + // The LinkedList does not need to be thread safe, as the map associates thread IDs with LinkedList, and only + // one thread will access the LinkedList at a time. + private final Map> threadToRewriteTimers; + + public ConcurrentQueryProfiler(AbstractQueryProfileTree profileTree) { + super(profileTree); + long threadId = getCurrentThreadId(); + // We utilize LinkedHashMap to preserve the insertion order of the profiled queries + threadToProfileTree = Collections.synchronizedMap(new LinkedHashMap<>()); + threadToProfileTree.put(threadId, (ConcurrentQueryProfileTree) profileTree); + threadToRewriteTimers = new ConcurrentHashMap<>(); + threadToRewriteTimers.put(threadId, new LinkedList<>()); + } + + @Override + public ContextualProfileBreakdown getQueryBreakdown(Query query) { + ConcurrentQueryProfileTree profileTree = threadToProfileTree.computeIfAbsent( + getCurrentThreadId(), + k -> new ConcurrentQueryProfileTree() + ); + return profileTree.getProfileBreakdown(query); + } + + /** + * Removes the last (e.g. most recent) element on ConcurrentQueryProfileTree stack. + */ + @Override + public void pollLastElement() { + ConcurrentQueryProfileTree concurrentProfileTree = threadToProfileTree.get(getCurrentThreadId()); + if (concurrentProfileTree != null) { + concurrentProfileTree.pollLast(); + } + } + + /** + * @return a hierarchical representation of the profiled tree + */ + @Override + public List getTree() { + List profileResults = new ArrayList<>(); + for (Map.Entry profile : threadToProfileTree.entrySet()) { + profileResults.addAll(profile.getValue().getTree()); + } + return profileResults; + } + + /** + * Begin timing the rewrite phase of a request + */ + @Override + public void startRewriteTime() { + Timer rewriteTimer = new Timer(); + threadToRewriteTimers.computeIfAbsent(getCurrentThreadId(), k -> new LinkedList<>()).add(rewriteTimer); + rewriteTimer.start(); + } + + /** + * Stop recording the current rewrite timer + */ + public void stopAndAddRewriteTime() { + Timer rewriteTimer = threadToRewriteTimers.get(getCurrentThreadId()).getLast(); + rewriteTimer.stop(); + } + + /** + * @return total time taken to rewrite all queries in this concurrent query profiler + */ + @Override + public long getRewriteTime() { + long totalRewriteTime = 0L; + List rewriteTimers = new LinkedList<>(); + threadToRewriteTimers.values().forEach(rewriteTimers::addAll); + LinkedList mergedIntervals = mergeRewriteTimeIntervals(rewriteTimers); + for (long[] interval : mergedIntervals) { + totalRewriteTime += interval[1] - interval[0]; + } + return totalRewriteTime; + } + + // package private for unit testing + LinkedList mergeRewriteTimeIntervals(List timers) { + LinkedList mergedIntervals = new LinkedList<>(); + timers.sort(Comparator.comparingLong(Timer::getEarliestTimerStartTime)); + for (Timer timer : timers) { + long startTime = timer.getEarliestTimerStartTime(); + long endTime = startTime + timer.getApproximateTiming(); + if (mergedIntervals.isEmpty() || mergedIntervals.getLast()[1] < startTime) { + long[] interval = new long[2]; + interval[0] = startTime; + interval[1] = endTime; + mergedIntervals.add(interval); + } else { + mergedIntervals.getLast()[1] = Math.max(mergedIntervals.getLast()[1], endTime); + } + } + return mergedIntervals; + } + + private long getCurrentThreadId() { + return Thread.currentThread().getId(); + } +} diff --git a/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollector.java b/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollector.java index 8b860c3a58cea..024d91a8e2ed2 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollector.java +++ b/server/src/main/java/org/opensearch/search/profile/query/InternalProfileCollector.java @@ -44,11 +44,12 @@ /** * This class wraps a Lucene Collector and times the execution of: - * - setScorer() - * - collect() - * - doSetNextReader() - * - needsScores() - * + *

              + *
            • setScorer()
            • + *
            • collect()
            • + *
            • doSetNextReader()
            • + *
            • needsScores()
            • + *
            * InternalProfiler facilitates the linking of the Collector graph * * @opensearch.internal @@ -117,7 +118,7 @@ public Collector getCollector() { /** * Creates a human-friendly representation of the Collector name. - * + *

            * InternalBucket Collectors use the aggregation name in their toString() method, * which makes the profiled output a bit nicer. * diff --git a/server/src/main/java/org/opensearch/search/profile/query/InternalQueryProfileTree.java b/server/src/main/java/org/opensearch/search/profile/query/InternalQueryProfileTree.java index 40c6a29384491..1ed367f094fb7 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/InternalQueryProfileTree.java +++ b/server/src/main/java/org/opensearch/search/profile/query/InternalQueryProfileTree.java @@ -32,73 +32,18 @@ package org.opensearch.search.profile.query; -import org.apache.lucene.search.Query; -import org.opensearch.search.profile.AbstractInternalProfileTree; import org.opensearch.search.profile.ContextualProfileBreakdown; import org.opensearch.search.profile.ProfileResult; /** - * This class tracks the dependency tree for queries (scoring and rewriting) and - * generates {@link QueryProfileBreakdown} for each node in the tree. It also finalizes the tree - * and returns a list of {@link ProfileResult} that can be serialized back to the client + * This class returns a list of {@link ProfileResult} that can be serialized back to the client in the non-concurrent execution. * * @opensearch.internal */ -final class InternalQueryProfileTree extends AbstractInternalProfileTree, Query> { - - /** Rewrite time */ - private long rewriteTime; - private long rewriteScratch; - private final boolean concurrent; - - InternalQueryProfileTree(boolean concurrent) { - this.concurrent = concurrent; - } +public class InternalQueryProfileTree extends AbstractQueryProfileTree { @Override protected ContextualProfileBreakdown createProfileBreakdown() { - return (concurrent) ? new ConcurrentQueryProfileBreakdown() : new QueryProfileBreakdown(); - } - - @Override - protected String getTypeFromElement(Query query) { - // Anonymous classes won't have a name, - // we need to get the super class - if (query.getClass().getSimpleName().isEmpty()) { - return query.getClass().getSuperclass().getSimpleName(); - } - return query.getClass().getSimpleName(); - } - - @Override - protected String getDescriptionFromElement(Query query) { - return query.toString(); - } - - /** - * Begin timing a query for a specific Timing context - */ - public void startRewriteTime() { - assert rewriteScratch == 0; - rewriteScratch = System.nanoTime(); - } - - /** - * Halt the timing process and add the elapsed rewriting time. - * startRewriteTime() must be called for a particular context prior to calling - * stopAndAddRewriteTime(), otherwise the elapsed time will be negative and - * nonsensical - * - * @return The elapsed time - */ - public long stopAndAddRewriteTime() { - long time = Math.max(1, System.nanoTime() - rewriteScratch); - rewriteTime += time; - rewriteScratch = 0; - return time; - } - - public long getRewriteTime() { - return rewriteTime; + return new QueryProfileBreakdown(); } } diff --git a/server/src/main/java/org/opensearch/search/profile/query/ProfileWeight.java b/server/src/main/java/org/opensearch/search/profile/query/ProfileWeight.java index 12f9a7184d84a..c7e70d8d88007 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/ProfileWeight.java +++ b/server/src/main/java/org/opensearch/search/profile/query/ProfileWeight.java @@ -34,6 +34,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.BulkScorer; +import org.apache.lucene.search.Collector; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; @@ -137,4 +138,7 @@ public boolean isCacheable(LeafReaderContext ctx) { return false; } + public void associateCollectorToLeaves(LeafReaderContext leaf, Collector collector) { + profile.associateCollectorToLeaves(collector, leaf); + } } diff --git a/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java b/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java index 9527e010005c3..332c4b3551450 100644 --- a/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java +++ b/server/src/main/java/org/opensearch/search/profile/query/QueryProfiler.java @@ -44,22 +44,22 @@ * "online" as the weights are wrapped by ContextIndexSearcher. This allows us * to know the relationship between nodes in tree without explicitly * walking the tree or pre-wrapping everything - * + *

            * A Profiler is associated with every Search, not per Search-Request. E.g. a * request may execute two searches (query + global agg). A Profiler just * represents one of those * * @opensearch.internal */ -public final class QueryProfiler extends AbstractProfiler, Query> { +public class QueryProfiler extends AbstractProfiler, Query> { /** * The root Collector used in the search */ private InternalProfileComponent collector; - public QueryProfiler(boolean concurrent) { - super(new InternalQueryProfileTree(concurrent)); + public QueryProfiler(AbstractQueryProfileTree profileTree) { + super(profileTree); } /** Set the collector that is associated with this profiler. */ @@ -75,24 +75,24 @@ public void setCollector(InternalProfileComponent collector) { * single metric */ public void startRewriteTime() { - ((InternalQueryProfileTree) profileTree).startRewriteTime(); + ((AbstractQueryProfileTree) profileTree).startRewriteTime(); } /** * Stop recording the current rewrite and add it's time to the total tally, returning the * cumulative time so far. - * - * @return cumulative rewrite time */ - public long stopAndAddRewriteTime() { - return ((InternalQueryProfileTree) profileTree).stopAndAddRewriteTime(); + public void stopAndAddRewriteTime() { + ((AbstractQueryProfileTree) profileTree).stopAndAddRewriteTime(); } /** + * The rewriting process is complex and hard to display because queries can undergo significant changes. + * Instead of showing intermediate results, we display the cumulative time for the non-concurrent search case. * @return total time taken to rewrite all queries in this profile */ public long getRewriteTime() { - return ((InternalQueryProfileTree) profileTree).getRewriteTime(); + return ((AbstractQueryProfileTree) profileTree).getRewriteTime(); } /** diff --git a/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java b/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java index 9336b490a5333..631ace41090d7 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java +++ b/server/src/main/java/org/opensearch/search/query/QueryPhaseSearcherWrapper.java @@ -57,10 +57,11 @@ public boolean searchWith( boolean hasFilterCollector, boolean hasTimeout ) throws IOException { - if (searchContext.isConcurrentSegmentSearchEnabled()) { - LOGGER.info("Using concurrent search over segments (experimental)"); + if (searchContext.shouldUseConcurrentSearch()) { + LOGGER.debug("Using concurrent search over segments (experimental) for request with context id {}", searchContext.id()); return concurrentQueryPhaseSearcher.searchWith(searchContext, searcher, query, collectors, hasFilterCollector, hasTimeout); } else { + LOGGER.debug("Using non-concurrent search over segments for request with context id {}", searchContext.id()); return defaultQueryPhaseSearcher.searchWith(searchContext, searcher, query, collectors, hasFilterCollector, hasTimeout); } } @@ -72,10 +73,14 @@ public boolean searchWith( */ @Override public AggregationProcessor aggregationProcessor(SearchContext searchContext) { - if (searchContext.isConcurrentSegmentSearchEnabled()) { - LOGGER.info("Using concurrent search over segments (experimental)"); + if (searchContext.shouldUseConcurrentSearch()) { + LOGGER.debug( + "Using concurrent aggregation processor over segments (experimental) for request with context id {}", + searchContext.id() + ); return concurrentQueryPhaseSearcher.aggregationProcessor(searchContext); } else { + LOGGER.debug("Using non-concurrent aggregation processor over segments for request with context id {}", searchContext.id()); return defaultQueryPhaseSearcher.aggregationProcessor(searchContext); } } diff --git a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java index 1b113e4ba7551..07313fc88b463 100644 --- a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java @@ -130,7 +130,7 @@ public static QuerySearchResult nullInstance() { * Returns true if the result doesn't contain any useful information. * It is used by the search action to avoid creating an empty response on * shard request that rewrites to match_no_docs. - * + *

            * TODO: Currently we need the concrete aggregators to build empty responses. This means that we cannot * build an empty response in the coordinating node so we rely on this hack to ensure that at least one shard * returns a valid empty response. We should move the ability to create empty responses to aggregation builders diff --git a/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java b/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java index 39c34f7c0d5d5..86f6e542f97d1 100644 --- a/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java +++ b/server/src/main/java/org/opensearch/search/query/TopDocsCollectorContext.java @@ -183,7 +183,9 @@ CollectorManager createManager(CollectorManager( new TotalHitCountCollectorManager(sort), trackTotalHitsUpTo, diff --git a/server/src/main/java/org/opensearch/search/rescore/Rescorer.java b/server/src/main/java/org/opensearch/search/rescore/Rescorer.java index 33f8e5e7b535d..ae025f70c95b3 100644 --- a/server/src/main/java/org/opensearch/search/rescore/Rescorer.java +++ b/server/src/main/java/org/opensearch/search/rescore/Rescorer.java @@ -41,7 +41,7 @@ /** * A query rescorer interface used to re-rank the Top-K results of a previously * executed search. - * + *

            * Subclasses should borrow heavily from {@link QueryRescorer} because it is * fairly well behaved and documents that tradeoffs that it is making. There * is also an {@code ExampleRescorer} that is worth looking at. diff --git a/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java b/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java index d7d554c058c37..856e103193463 100644 --- a/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java +++ b/server/src/main/java/org/opensearch/search/slice/DocValuesSliceQuery.java @@ -49,7 +49,7 @@ /** * A {@link SliceQuery} that uses the numeric doc values of a field to do the slicing. - * + *

            * NOTE: With deterministic field values this query can be used across different readers safely. * If updates are accepted on the field you must ensure that the same reader is used for all `slice` queries. * diff --git a/server/src/main/java/org/opensearch/search/slice/TermsSliceQuery.java b/server/src/main/java/org/opensearch/search/slice/TermsSliceQuery.java index 297020fe2fe4d..05f36b0d6f3cf 100644 --- a/server/src/main/java/org/opensearch/search/slice/TermsSliceQuery.java +++ b/server/src/main/java/org/opensearch/search/slice/TermsSliceQuery.java @@ -53,7 +53,7 @@ /** * A {@link SliceQuery} that uses the terms dictionary of a field to do the slicing. - * + *

            * NOTE: The cost of this filter is O(N*M) where N is the number of unique terms in the dictionary * and M is the average number of documents per term. * For each segment this filter enumerates the terms dictionary, computes the hash code for each term and fills diff --git a/server/src/main/java/org/opensearch/search/sort/SortAndFormats.java b/server/src/main/java/org/opensearch/search/sort/SortAndFormats.java index 272b1e9c1dc8d..e65187e558aef 100644 --- a/server/src/main/java/org/opensearch/search/sort/SortAndFormats.java +++ b/server/src/main/java/org/opensearch/search/sort/SortAndFormats.java @@ -32,6 +32,7 @@ package org.opensearch.search.sort; import org.apache.lucene.search.Sort; +import org.opensearch.cluster.metadata.DataStream; import org.opensearch.search.DocValueFormat; /** @@ -52,4 +53,13 @@ public SortAndFormats(Sort sort, DocValueFormat[] formats) { this.formats = formats; } + /** + * @return true: if sort is on timestamp field, false: otherwise + */ + public boolean isSortOnTimeSeriesField() { + return sort.getSort().length > 0 + && sort.getSort()[0].getField() != null + && sort.getSort()[0].getField().equals(DataStream.TIMESERIES_FIELDNAME); + } + } diff --git a/server/src/main/java/org/opensearch/search/suggest/Suggest.java b/server/src/main/java/org/opensearch/search/suggest/Suggest.java index 89884fe030564..d550a171c9c29 100644 --- a/server/src/main/java/org/opensearch/search/suggest/Suggest.java +++ b/server/src/main/java/org/opensearch/search/suggest/Suggest.java @@ -307,7 +307,7 @@ public void addTerm(T entry) { /** * Returns a integer representing the type of the suggestion. This is used for * internal serialization over the network. - * + *

            * This class is now serialized as a NamedWriteable and this method only remains for backwards compatibility */ @Deprecated diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggester.java b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggester.java index af7b1a798a779..ed7bd709f94bc 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggester.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggester.java @@ -116,6 +116,7 @@ private static void suggest(IndexSearcher searcher, CompletionQuery query, TopSu } } } + collector.finish(); } @Override diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestion.java b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestion.java index 96e47cf7c8000..e3e6cad65be62 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestion.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggestion.java @@ -63,14 +63,14 @@ /** * Suggestion response for {@link CompletionSuggester} results - * + *

            * Response format for each entry: * { * "text" : STRING * "score" : FLOAT * "contexts" : CONTEXTS * } - * + *

            * CONTEXTS : { * "CONTEXT_NAME" : ARRAY, * .. diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/FuzzyOptions.java b/server/src/main/java/org/opensearch/search/suggest/completion/FuzzyOptions.java index 56cc8fbfbcf66..139742f84b80b 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/FuzzyOptions.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/FuzzyOptions.java @@ -142,7 +142,7 @@ public int getEditDistance() { /** * Returns if transpositions option is set - * + *

            * if transpositions is set, then swapping one character for another counts as one edit instead of two. */ public boolean isTranspositions() { diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/TopSuggestGroupDocsCollector.java b/server/src/main/java/org/opensearch/search/suggest/completion/TopSuggestGroupDocsCollector.java index 4fbd661037aa9..fbc39536502de 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/TopSuggestGroupDocsCollector.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/TopSuggestGroupDocsCollector.java @@ -43,7 +43,7 @@ /** * * Extension of the {@link TopSuggestDocsCollector} that returns top documents from the completion suggester. - * + *

            * This collector groups suggestions coming from the same document but matching different contexts * or surface form together. When different contexts or surface forms match the same suggestion form only * the best one per document (sorted by weight) is kept. @@ -55,7 +55,7 @@ class TopSuggestGroupDocsCollector extends TopSuggestDocsCollector { /** * Sole constructor - * + *

            * Collects at most num completions * with corresponding document and weight */ diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMapping.java b/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMapping.java index 61e60293f4943..94707ff2b4569 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMapping.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMapping.java @@ -54,7 +54,7 @@ /** * A {@link ContextMapping} defines criteria that can be used to * filter and/or boost suggestions at query time for {@link CompletionFieldMapper}. - * + *

            * Implementations have to define how contexts are parsed at query/index time * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMappings.java b/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMappings.java index 0f5781fefcf07..cff5a901a473f 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMappings.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/context/ContextMappings.java @@ -124,11 +124,11 @@ public Iterator> iterator() { * Field prepends context values with a suggestion * Context values are associated with a type, denoted by * a type id, which is prepended to the context value. - * + *

            * Every defined context mapping yields a unique type id (index of the * corresponding context mapping in the context mappings list) * for all its context values - * + *

            * The type, context and suggestion values are encoded as follows: *

            * TYPE_ID | CONTEXT_VALUE | CONTEXT_SEP | SUGGESTION_VALUE @@ -209,7 +209,7 @@ public ContextQuery toContextQuery(CompletionQuery query, Map * see {@link org.opensearch.search.suggest.completion.context.ContextMappings.TypedContextField} * @return a map of context names and their values * @@ -232,7 +232,7 @@ public Map> getNamedContexts(List contexts) { /** * Loads {@link ContextMappings} from configuration - * + *

            * Expected configuration: * List of maps representing {@link ContextMapping} * [{"name": .., "type": .., ..}, {..}] @@ -286,7 +286,7 @@ private static String extractRequiredValue(Map contextConfig, St /** * Writes a list of objects specified by the defined {@link ContextMapping}s - * + *

            * see {@link ContextMapping#toXContent(XContentBuilder, Params)} */ @Override diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java index b73061b9e520d..6106b563dac8e 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java @@ -72,7 +72,7 @@ * The suggestions can be boosted and/or filtered depending on * whether it falls within an area, represented by a query geo hash * with a specified precision - * + *

            * {@link GeoQueryContext} defines the options for constructing * a unit of query context for this context type * diff --git a/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java b/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java index 8a6cba837d28a..225d212d6e77c 100644 --- a/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java +++ b/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java @@ -147,9 +147,9 @@ public TermStats internalTermStats(BytesRef term) throws IOException { if (termsEnum.seekExact(term)) { return new TermStats( termsEnum.docFreq(), - /** - * We use the {@link TermsEnum#docFreq()} for fields that don't - * record the {@link TermsEnum#totalTermFreq()}. + /* + We use the {@link TermsEnum#docFreq()} for fields that don't + record the {@link TermsEnum#totalTermFreq()}. */ termsEnum.totalTermFreq() == -1 ? termsEnum.docFreq() : termsEnum.totalTermFreq() ); @@ -168,10 +168,10 @@ public CandidateSet drawCandidates(CandidateSet set) throws IOException { float origThreshold = spellchecker.getThresholdFrequency(); try { if (suggestMode != SuggestMode.SUGGEST_ALWAYS) { - /** - * We use the {@link TermStats#docFreq} to compute the frequency threshold - * because that's what {@link DirectSpellChecker#suggestSimilar} expects - * when filtering terms. + /* + We use the {@link TermStats#docFreq} to compute the frequency threshold + because that's what {@link DirectSpellChecker#suggestSimilar} expects + when filtering terms. */ int threshold = thresholdTermFrequency(original.termStats.docFreq); if (threshold == Integer.MAX_VALUE) { diff --git a/server/src/main/java/org/opensearch/search/suggest/phrase/LinearInterpolation.java b/server/src/main/java/org/opensearch/search/suggest/phrase/LinearInterpolation.java index f1dba9793ba9e..bc942da738c7e 100644 --- a/server/src/main/java/org/opensearch/search/suggest/phrase/LinearInterpolation.java +++ b/server/src/main/java/org/opensearch/search/suggest/phrase/LinearInterpolation.java @@ -70,7 +70,7 @@ public final class LinearInterpolation extends SmoothingModel { /** * Creates a linear interpolation smoothing model. - * + *

            * Note: the lambdas must sum up to one. * * @param trigramLambda diff --git a/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggestionBuilder.java index e37d964cc0424..a6bfb880cf249 100644 --- a/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -222,7 +222,7 @@ public Integer gramSize() { * misspellings in order to form a correction. This method accepts a float * value in the range [0..1) as a fraction of the actual query terms a * number {@code >=1} as an absolute number of query terms. - * + *

            * The default is set to {@code 1.0} which corresponds to that only * corrections with at most 1 misspelled term are returned. */ diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java index 8474c2351e77e..d665a4aa579ad 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java @@ -37,6 +37,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.IndexCommit; import org.opensearch.Version; +import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterStateListener; import org.opensearch.cluster.SnapshotsInProgress; @@ -74,6 +75,7 @@ import org.opensearch.transport.TransportService; import java.io.IOException; +import java.nio.file.NoSuchFileException; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -385,6 +387,12 @@ private void snapshot( if (indexShard.routingEntry().primary() == false) { throw new IndexShardSnapshotFailedException(shardId, "snapshot should be performed only on primary"); } + if (indexShard.indexSettings().isSegRepEnabled() && indexShard.isPrimaryMode() == false) { + throw new IndexShardSnapshotFailedException( + shardId, + "snapshot triggered on a new primary following failover and cannot proceed until promotion is complete" + ); + } if (indexShard.routingEntry().relocating()) { // do not snapshot when in the process of relocation of primaries so we won't get conflicts throw new IndexShardSnapshotFailedException(shardId, "cannot snapshot while relocating"); @@ -401,18 +409,32 @@ private void snapshot( try { if (remoteStoreIndexShallowCopy && indexShard.indexSettings().isRemoteStoreEnabled()) { long startTime = threadPool.relativeTimeInMillis(); + long primaryTerm = indexShard.getOperationPrimaryTerm(); // we flush first to make sure we get the latest writes snapshotted wrappedSnapshot = indexShard.acquireLastIndexCommitAndRefresh(true); - long primaryTerm = indexShard.getOperationPrimaryTerm(); - final IndexCommit snapshotIndexCommit = wrappedSnapshot.get(); + IndexCommit snapshotIndexCommit = wrappedSnapshot.get(); long commitGeneration = snapshotIndexCommit.getGeneration(); - indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + try { + indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + } catch (NoSuchFileException e) { + wrappedSnapshot.close(); + logger.warn( + "Exception while acquiring lock on primaryTerm = {} and generation = {}", + primaryTerm, + commitGeneration + ); + indexShard.flush(new FlushRequest(shardId.getIndexName()).force(true)); + wrappedSnapshot = indexShard.acquireLastIndexCommit(false); + snapshotIndexCommit = wrappedSnapshot.get(); + commitGeneration = snapshotIndexCommit.getGeneration(); + indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + } try { repository.snapshotRemoteStoreIndexShard( indexShard.store(), snapshot.getSnapshotId(), indexId, - wrappedSnapshot.get(), + snapshotIndexCommit, getShardStateId(indexShard, snapshotIndexCommit), snapshotStatus, primaryTerm, diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 0a1ba2486cba6..b567649367b0c 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -2005,7 +2005,7 @@ private void runNextQueuedOperation(RepositoryData repositoryData, String reposi /** * Runs a cluster state update that checks whether we have outstanding snapshot deletions that can be executed and executes them. - * + *

            * TODO: optimize this to execute in a single CS update together with finalizing the latest snapshot */ private void runReadyDeletions(RepositoryData repositoryData, String repository) { @@ -3332,7 +3332,7 @@ public boolean assertAllListenersResolved() { * Every shard snapshot or clone state update can result in multiple snapshots being updated. In order to determine whether or not a * shard update has an effect we use an outer loop over all current executing snapshot operations that iterates over them in the order * they were started in and an inner loop over the list of shard update tasks. - * + *

            * If the inner loop finds that a shard update task applies to a given snapshot and either a shard-snapshot or shard-clone operation in * it then it will update the state of the snapshot entry accordingly. If that update was a noop, then the task is removed from the * iteration as it was already applied before and likely just arrived on the cluster-manager node again due to retries upstream. @@ -3342,7 +3342,7 @@ public boolean assertAllListenersResolved() { * a task in the executed tasks collection applied to a shard it was waiting for to become available, then the shard snapshot operation * will be started for that snapshot entry and the task removed from the collection of tasks that need to be applied to snapshot * entries since it can not have any further effects. - * + *

            * Package private to allow for tests. */ static final ClusterStateTaskExecutor SHARD_STATE_EXECUTOR = new ClusterStateTaskExecutor() { @@ -3632,7 +3632,7 @@ private static ShardSnapshotStatus startShardSnapshotAfterClone(ClusterState cur /** * An update to the snapshot state of a shard. - * + *

            * Package private for testing */ static final class ShardSnapshotUpdate { diff --git a/server/src/main/java/org/opensearch/tasks/Task.java b/server/src/main/java/org/opensearch/tasks/Task.java index 22555bce4545c..594fb674b82c1 100644 --- a/server/src/main/java/org/opensearch/tasks/Task.java +++ b/server/src/main/java/org/opensearch/tasks/Task.java @@ -550,11 +550,11 @@ public int incrementResourceTrackingThreads() { * This method is called when threads finish execution, and also when the task is unregistered (to mark the task's * own thread as complete). When the active thread count becomes zero, the onTaskResourceTrackingCompleted method * is called exactly once on all registered listeners. - * + *

            * Since a task is unregistered after the message is processed, it implies that the threads responsible to produce * the response must have started prior to it (i.e. startThreadResourceTracking called before unregister). * This ensures that the number of active threads doesn't drop to zero pre-maturely. - * + *

            * Rarely, some threads may even start execution after the task is unregistered. As resource stats are piggy-backed * with the response, any thread usage info captured after the task is unregistered may be irrelevant. * diff --git a/server/src/main/java/org/opensearch/tasks/TaskCancellation.java b/server/src/main/java/org/opensearch/tasks/TaskCancellation.java index 9b61b2454b53c..2d152e513f197 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskCancellation.java +++ b/server/src/main/java/org/opensearch/tasks/TaskCancellation.java @@ -18,7 +18,7 @@ /** * TaskCancellation represents a task eligible for cancellation. * It doesn't guarantee that the task will actually get cancelled or not; that decision is left to the caller. - * + *

            * It contains a list of cancellation reasons along with callbacks that are invoked when cancel() is called. * * @opensearch.internal @@ -87,7 +87,7 @@ private void runOnCancelCallbacks() { /** * Returns the sum of all cancellation scores. - * + *

            * A zero score indicates no reason to cancel the task. * A task with a higher score suggests greater possibility of recovering the node when it is cancelled. */ diff --git a/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java b/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java index 7c9e0d5ac8097..24dcab98c8870 100644 --- a/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java +++ b/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java @@ -8,13 +8,18 @@ package org.opensearch.telemetry; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; /** * Wrapper class to encapsulate tracing related settings + * + * @opensearch.experimental */ +@ExperimentalApi public class TelemetrySettings { public static final Setting TRACER_ENABLED_SETTING = Setting.boolSetting( "telemetry.tracer.enabled", @@ -23,12 +28,56 @@ public class TelemetrySettings { Setting.Property.Dynamic ); + public static final Setting TRACER_FEATURE_ENABLED_SETTING = Setting.boolSetting( + "telemetry.feature.tracer.enabled", + false, + Setting.Property.NodeScope, + Setting.Property.Final + ); + + public static final Setting METRICS_FEATURE_ENABLED_SETTING = Setting.boolSetting( + "telemetry.feature.metrics.enabled", + false, + Setting.Property.NodeScope, + Setting.Property.Final + ); + + /** + * Probability of sampler + */ + public static final Setting TRACER_SAMPLER_PROBABILITY = Setting.doubleSetting( + "telemetry.tracer.sampler.probability", + 0.01d, + 0.00d, + 1.00d, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + /** + * metrics publish interval in seconds. + */ + public static final Setting METRICS_PUBLISH_INTERVAL_SETTING = Setting.timeSetting( + "telemetry.otel.metrics.publish.interval", + TimeValue.timeValueSeconds(60), + Setting.Property.NodeScope, + Setting.Property.Final + ); + private volatile boolean tracingEnabled; + private volatile double samplingProbability; + + private final boolean tracingFeatureEnabled; + private final boolean metricsFeatureEnabled; public TelemetrySettings(Settings settings, ClusterSettings clusterSettings) { this.tracingEnabled = TRACER_ENABLED_SETTING.get(settings); + this.samplingProbability = TRACER_SAMPLER_PROBABILITY.get(settings); + this.tracingFeatureEnabled = TRACER_FEATURE_ENABLED_SETTING.get(settings); + this.metricsFeatureEnabled = METRICS_FEATURE_ENABLED_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(TRACER_ENABLED_SETTING, this::setTracingEnabled); + clusterSettings.addSettingsUpdateConsumer(TRACER_SAMPLER_PROBABILITY, this::setSamplingProbability); } public void setTracingEnabled(boolean tracingEnabled) { @@ -39,4 +88,26 @@ public boolean isTracingEnabled() { return tracingEnabled; } + /** + * Set sampling ratio + * @param samplingProbability double + */ + public void setSamplingProbability(double samplingProbability) { + this.samplingProbability = samplingProbability; + } + + /** + * Get sampling ratio + */ + public double getSamplingProbability() { + return samplingProbability; + } + + public boolean isTracingFeatureEnabled() { + return tracingFeatureEnabled; + } + + public boolean isMetricsFeatureEnabled() { + return metricsFeatureEnabled; + } } diff --git a/server/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistryFactory.java b/server/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistryFactory.java new file mode 100644 index 0000000000000..c7e2229c18437 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/metrics/MetricsRegistryFactory.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; +import org.opensearch.telemetry.tracing.Tracer; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Optional; + +/** + * {@link MetricsRegistryFactory} represents a single global class that is used to access {@link MetricsRegistry}s. + *

            + * The {@link MetricsRegistry} singleton object can be retrieved using MetricsRegistryFactory::getMetricsRegistry. The {@link MetricsRegistryFactory} object + * is created during class initialization and cannot subsequently be changed. + * + * @opensearch.internal + */ +@InternalApi +public class MetricsRegistryFactory implements Closeable { + + private static final Logger logger = LogManager.getLogger(MetricsRegistryFactory.class); + + private final TelemetrySettings telemetrySettings; + private final MetricsRegistry metricsRegistry; + + public MetricsRegistryFactory(TelemetrySettings telemetrySettings, Optional telemetry) { + this.telemetrySettings = telemetrySettings; + this.metricsRegistry = metricsRegistry(telemetry); + } + + /** + * Returns the {@link MetricsRegistry} instance + * + * @return MetricsRegistry instance + */ + public MetricsRegistry getMetricsRegistry() { + return metricsRegistry; + } + + /** + * Closes the {@link Tracer} + */ + @Override + public void close() { + try { + metricsRegistry.close(); + } catch (IOException e) { + logger.warn("Error closing MetricsRegistry", e); + } + } + + private MetricsRegistry metricsRegistry(Optional telemetry) { + MetricsRegistry metricsRegistry = telemetry.map(Telemetry::getMetricsTelemetry) + .map(metricsTelemetry -> createDefaultMetricsRegistry(metricsTelemetry)) + .orElse(NoopMetricsRegistry.INSTANCE); + return metricsRegistry; + } + + private MetricsRegistry createDefaultMetricsRegistry(MetricsTelemetry metricsTelemetry) { + return new DefaultMetricsRegistry(metricsTelemetry); + } + +} diff --git a/server/src/main/java/org/opensearch/telemetry/metrics/NoopMetricsRegistryFactory.java b/server/src/main/java/org/opensearch/telemetry/metrics/NoopMetricsRegistryFactory.java new file mode 100644 index 0000000000000..5137cb18e2cc0 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/metrics/NoopMetricsRegistryFactory.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; + +import java.util.Optional; + +/** + * No-op implementation of {@link MetricsRegistryFactory} + * + * @opensearch.internal + */ +@InternalApi +public class NoopMetricsRegistryFactory extends MetricsRegistryFactory { + public NoopMetricsRegistryFactory() { + super(null, Optional.empty()); + } + + @Override + public MetricsRegistry getMetricsRegistry() { + return NoopMetricsRegistry.INSTANCE; + } + + @Override + public void close() { + + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/metrics/package-info.java b/server/src/main/java/org/opensearch/telemetry/metrics/package-info.java new file mode 100644 index 0000000000000..ad4564e1d7773 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/metrics/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for telemetry. + */ +package org.opensearch.telemetry.metrics; diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java b/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java new file mode 100644 index 0000000000000..b6b2cf360d1c5 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/AttributeNames.java @@ -0,0 +1,97 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Hold the Attribute names to avoid the duplication and consistency. + * + * @opensearch.experimental + */ +@ExperimentalApi +public final class AttributeNames { + + /** + * Constructor + */ + private AttributeNames() { + + } + + /** + * HTTP Protocol Version + */ + public static final String HTTP_PROTOCOL_VERSION = "http.version"; + + /** + * HTTP method + */ + public static final String HTTP_METHOD = "http.method"; + + /** + * HTTP Request URI. + */ + public static final String HTTP_URI = "http.uri"; + + /** + * Rest Request ID. + */ + public static final String REST_REQ_ID = "rest.request_id"; + + /** + * Rest Request Raw Path. + */ + public static final String REST_REQ_RAW_PATH = "rest.raw_path"; + + /** + * Trace key. To be used for on demand sampling. + */ + public static final String TRACE = "trace"; + + /** + * Transport Service send request target host. + */ + public static final String TRANSPORT_TARGET_HOST = "target_host"; + + /** + * Transport Service send request local host. + */ + public static final String TRANSPORT_HOST = "host"; + + /** + * Action Name. + */ + public static final String TRANSPORT_ACTION = "action"; + + /** + * Index Name + */ + public static final String INDEX = "index"; + + /** + * Shard ID + */ + public static final String SHARD_ID = "shard_id"; + + /** + * Number of request items in bulk request + */ + public static final String BULK_REQUEST_ITEMS = "bulk_request_items"; + + /** + * Node ID + */ + public static final String NODE_ID = "node_id"; + + /** + * Refresh Policy + */ + public static final String REFRESH_POLICY = "refresh_policy"; +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/NoopTracerFactory.java b/server/src/main/java/org/opensearch/telemetry/tracing/NoopTracerFactory.java index f82a390dc1754..87762f342a653 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/NoopTracerFactory.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/NoopTracerFactory.java @@ -8,6 +8,7 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.telemetry.tracing.noop.NoopTracer; import java.util.Optional; @@ -17,6 +18,7 @@ * * @opensearch.internal */ +@InternalApi public class NoopTracerFactory extends TracerFactory { public NoopTracerFactory() { super(null, Optional.empty(), null); diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java new file mode 100644 index 0000000000000..1dce422943b7a --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/SpanBuilder.java @@ -0,0 +1,173 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.action.bulk.BulkShardRequest; +import org.opensearch.action.support.replication.ReplicatedWriteRequest; +import org.opensearch.common.annotation.InternalApi; +import org.opensearch.core.common.Strings; +import org.opensearch.http.HttpRequest; +import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.transport.TcpChannel; +import org.opensearch.transport.Transport; + +import java.util.Arrays; +import java.util.List; + +/** + * Utility class, helps in creating the {@link SpanCreationContext} for span. + * + * @opensearch.internal + */ +@InternalApi +public final class SpanBuilder { + + private static final List HEADERS_TO_BE_ADDED_AS_ATTRIBUTES = Arrays.asList(AttributeNames.TRACE); + /** + * Attribute name Separator + */ + private static final String SEPARATOR = " "; + + /** + * Constructor + */ + private SpanBuilder() { + + } + + /** + * Creates {@link SpanCreationContext} from the {@link HttpRequest} + * @param request Http request. + * @return context. + */ + public static SpanCreationContext from(HttpRequest request) { + return SpanCreationContext.server().name(createSpanName(request)).attributes(buildSpanAttributes(request)); + } + + /** + * Creates {@link SpanCreationContext} from the {@link RestRequest} + * @param request Rest request + * @return context + */ + public static SpanCreationContext from(RestRequest request) { + return SpanCreationContext.client().name(createSpanName(request)).attributes(buildSpanAttributes(request)); + } + + /** + * Creates {@link SpanCreationContext} from Transport action and connection details. + * @param action action. + * @param connection transport connection. + * @return context + */ + public static SpanCreationContext from(String action, Transport.Connection connection) { + return SpanCreationContext.server().name(createSpanName(action, connection)).attributes(buildSpanAttributes(action, connection)); + } + + public static SpanCreationContext from(String spanName, String nodeId, ReplicatedWriteRequest request) { + return SpanCreationContext.server().name(spanName).attributes(buildSpanAttributes(nodeId, request)); + } + + private static String createSpanName(HttpRequest httpRequest) { + return httpRequest.method().name() + SEPARATOR + httpRequest.uri(); + } + + private static Attributes buildSpanAttributes(HttpRequest httpRequest) { + Attributes attributes = Attributes.create() + .addAttribute(AttributeNames.HTTP_URI, httpRequest.uri()) + .addAttribute(AttributeNames.HTTP_METHOD, httpRequest.method().name()) + .addAttribute(AttributeNames.HTTP_PROTOCOL_VERSION, httpRequest.protocolVersion().name()); + populateHeader(httpRequest, attributes); + return attributes; + } + + private static void populateHeader(HttpRequest httpRequest, Attributes attributes) { + HEADERS_TO_BE_ADDED_AS_ATTRIBUTES.forEach(x -> { + if (httpRequest.getHeaders() != null + && httpRequest.getHeaders().get(x) != null + && (httpRequest.getHeaders().get(x).isEmpty() == false)) { + attributes.addAttribute(x, Strings.collectionToCommaDelimitedString(httpRequest.getHeaders().get(x))); + } + }); + } + + private static String createSpanName(RestRequest restRequest) { + String spanName = "rest_request"; + if (restRequest != null) { + try { + String methodName = restRequest.method().name(); + // path() does the decoding, which may give error + String path = restRequest.path(); + spanName = methodName + SEPARATOR + path; + } catch (Exception e) { + // swallow the exception and keep the default name. + } + } + return spanName; + } + + private static Attributes buildSpanAttributes(RestRequest restRequest) { + if (restRequest != null) { + return Attributes.create() + .addAttribute(AttributeNames.REST_REQ_ID, restRequest.getRequestId()) + .addAttribute(AttributeNames.REST_REQ_RAW_PATH, restRequest.rawPath()); + } else { + return Attributes.EMPTY; + } + } + + private static String createSpanName(String action, Transport.Connection connection) { + return action + SEPARATOR + (connection.getNode() != null ? connection.getNode().getHostAddress() : null); + } + + private static Attributes buildSpanAttributes(String action, Transport.Connection connection) { + Attributes attributes = Attributes.create().addAttribute(AttributeNames.TRANSPORT_ACTION, action); + if (connection != null && connection.getNode() != null) { + attributes.addAttribute(AttributeNames.TRANSPORT_TARGET_HOST, connection.getNode().getHostAddress()); + } + return attributes; + } + + /** + * Creates {@link SpanCreationContext} from Inbound Handler. + * @param action action. + * @param tcpChannel tcp channel. + * @return context + */ + public static SpanCreationContext from(String action, TcpChannel tcpChannel) { + return SpanCreationContext.server().name(createSpanName(action, tcpChannel)).attributes(buildSpanAttributes(action, tcpChannel)); + } + + private static String createSpanName(String action, TcpChannel tcpChannel) { + return action + SEPARATOR + (tcpChannel.getRemoteAddress() != null + ? tcpChannel.getRemoteAddress().getHostString() + : tcpChannel.getLocalAddress().getHostString()); + } + + private static Attributes buildSpanAttributes(String action, TcpChannel tcpChannel) { + Attributes attributes = Attributes.create().addAttribute(AttributeNames.TRANSPORT_ACTION, action); + attributes.addAttribute(AttributeNames.TRANSPORT_HOST, tcpChannel.getLocalAddress().getHostString()); + return attributes; + } + + private static Attributes buildSpanAttributes(String nodeId, ReplicatedWriteRequest request) { + Attributes attributes = Attributes.create() + .addAttribute(AttributeNames.NODE_ID, nodeId) + .addAttribute(AttributeNames.REFRESH_POLICY, request.getRefreshPolicy().getValue()); + if (request.shardId() != null) { + attributes.addAttribute(AttributeNames.INDEX, request.shardId().getIndexName()) + .addAttribute(AttributeNames.SHARD_ID, request.shardId().getId()); + } + if (request instanceof BulkShardRequest) { + attributes.addAttribute(AttributeNames.BULK_REQUEST_ITEMS, ((BulkShardRequest) request).items().length); + } + return attributes; + } + +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java b/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java index c009ab2391aab..208df90f65d74 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java @@ -8,6 +8,7 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.concurrent.ThreadContextStatePropagator; @@ -21,6 +22,7 @@ * * @opensearch.internal */ +@InternalApi public class ThreadContextBasedTracerContextStorage implements TracerContextStorage, ThreadContextStatePropagator { private final ThreadContext threadContext; @@ -40,9 +42,6 @@ public Span get(String key) { @Override public void put(String key, Span span) { - if (span == null) { - return; - } SpanReference currentSpanRef = threadContext.getTransient(key); if (currentSpanRef == null) { threadContext.putTransient(key, new SpanReference(span)); @@ -71,7 +70,7 @@ public Map headers(Map source) { if (source.containsKey(CURRENT_SPAN)) { final SpanReference current = (SpanReference) source.get(CURRENT_SPAN); - if (current != null) { + if (current != null && current.getSpan() != null) { tracingTelemetry.getContextPropagator().inject(current.getSpan(), (key, value) -> headers.put(key, value)); } } @@ -90,6 +89,7 @@ private Optional spanFromThreadContext(String key) { } private Span spanFromHeader() { - return tracingTelemetry.getContextPropagator().extract(threadContext.getHeaders()); + Optional span = tracingTelemetry.getContextPropagator().extract(threadContext.getHeaders()); + return span.orElse(null); } } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/TracerFactory.java b/server/src/main/java/org/opensearch/telemetry/tracing/TracerFactory.java index d8fe812c82f53..1cb73e0247c3a 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/TracerFactory.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/TracerFactory.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.TelemetrySettings; @@ -24,7 +25,10 @@ *

            * The Tracer singleton object can be retrieved using tracerManager.getTracer(). The TracerManager object * is created during class initialization and cannot subsequently be changed. + * + * @opensearch.internal */ +@InternalApi public class TracerFactory implements Closeable { private static final Logger logger = LogManager.getLogger(TracerFactory.class); diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java b/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java index 466abaac435f3..631fb8242d78e 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/WrappedTracer.java @@ -8,17 +8,20 @@ package org.opensearch.telemetry.tracing; +import org.opensearch.common.annotation.InternalApi; import org.opensearch.telemetry.TelemetrySettings; -import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.telemetry.tracing.noop.NoopTracer; import java.io.IOException; +import java.util.List; +import java.util.Map; /** * Wrapper implementation of Tracer. This delegates call to right tracer based on the tracer settings * * @opensearch.internal */ +@InternalApi final class WrappedTracer implements Tracer { private final Tracer defaultTracer; @@ -36,13 +39,8 @@ public WrappedTracer(TelemetrySettings telemetrySettings, Tracer defaultTracer) } @Override - public SpanScope startSpan(String spanName) { - return startSpan(spanName, Attributes.EMPTY); - } - - @Override - public SpanScope startSpan(String spanName, Attributes attributes) { - return startSpan(spanName, null, attributes); + public Span startSpan(SpanCreationContext context) { + return getDelegateTracer().startSpan(context); } @Override @@ -52,9 +50,18 @@ public SpanContext getCurrentSpan() { } @Override - public SpanScope startSpan(String spanName, SpanContext parentSpan, Attributes attributes) { - Tracer delegateTracer = getDelegateTracer(); - return delegateTracer.startSpan(spanName, parentSpan, attributes); + public ScopedSpan startScopedSpan(SpanCreationContext spanCreationContext) { + return getDelegateTracer().startScopedSpan(spanCreationContext); + } + + @Override + public SpanScope withSpanInScope(Span span) { + return getDelegateTracer().withSpanInScope(span); + } + + @Override + public boolean isRecording() { + return getDelegateTracer().isRecording(); } @Override @@ -66,4 +73,9 @@ public void close() throws IOException { Tracer getDelegateTracer() { return telemetrySettings.isTracingEnabled() ? defaultTracer : NoopTracer.INSTANCE; } + + @Override + public Span startSpan(SpanCreationContext spanCreationContext, Map> headers) { + return defaultTracer.startSpan(spanCreationContext, headers); + } } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java new file mode 100644 index 0000000000000..e0fb690bd29be --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableHttpChannel.java @@ -0,0 +1,100 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.channels; + +import org.opensearch.core.action.ActionListener; +import org.opensearch.http.HttpChannel; +import org.opensearch.http.HttpResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.listener.TraceableActionListener; + +import java.net.InetSocketAddress; +import java.util.Objects; +import java.util.Optional; + +/** + * Tracer wrapped {@link HttpChannel} + */ +public class TraceableHttpChannel implements HttpChannel { + private final HttpChannel delegate; + private final Span span; + private final Tracer tracer; + + /** + * Constructor. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + */ + private TraceableHttpChannel(HttpChannel delegate, Span span, Tracer tracer) { + this.span = Objects.requireNonNull(span); + this.delegate = Objects.requireNonNull(delegate); + this.tracer = Objects.requireNonNull(tracer); + } + + /** + * Factory method. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + * @return http channel + */ + public static HttpChannel create(HttpChannel delegate, Span span, Tracer tracer) { + if (tracer.isRecording() == true) { + return new TraceableHttpChannel(delegate, span, tracer); + } else { + return delegate; + } + } + + @Override + public void handleException(Exception ex) { + span.addEvent("The HttpChannel was closed without sending the response"); + span.setError(ex); + span.endSpan(); + } + + @Override + public void close() { + delegate.close(); + } + + @Override + public void addCloseListener(ActionListener listener) { + delegate.addCloseListener(listener); + } + + @Override + public boolean isOpen() { + return delegate.isOpen(); + } + + @Override + public void sendResponse(HttpResponse response, ActionListener listener) { + delegate.sendResponse(response, TraceableActionListener.create(listener, span, tracer)); + } + + @Override + public InetSocketAddress getLocalAddress() { + return delegate.getLocalAddress(); + } + + @Override + public InetSocketAddress getRemoteAddress() { + return delegate.getRemoteAddress(); + } + + @Override + public Optional get(String name, Class clazz) { + return delegate.get(name, clazz); + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableRestChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableRestChannel.java new file mode 100644 index 0000000000000..32769dd1d848d --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableRestChannel.java @@ -0,0 +1,105 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.channels; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; + +import java.io.IOException; +import java.util.Objects; + +/** + * Tracer wrapped {@link RestChannel} + */ +public class TraceableRestChannel implements RestChannel { + + private final RestChannel delegate; + private final Span span; + + private final Tracer tracer; + + /** + * Constructor. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + */ + private TraceableRestChannel(RestChannel delegate, Span span, Tracer tracer) { + this.span = Objects.requireNonNull(span); + this.delegate = Objects.requireNonNull(delegate); + this.tracer = Objects.requireNonNull(tracer); + } + + /** + * Factory method. + * @param delegate delegate + * @param span span + * @param tracer tracer + * @return rest channel + */ + public static RestChannel create(RestChannel delegate, Span span, Tracer tracer) { + if (tracer.isRecording() == true) { + return new TraceableRestChannel(delegate, span, tracer); + } else { + return delegate; + } + } + + @Override + public XContentBuilder newBuilder() throws IOException { + return delegate.newBuilder(); + } + + @Override + public XContentBuilder newErrorBuilder() throws IOException { + return delegate.newErrorBuilder(); + } + + @Override + public XContentBuilder newBuilder(MediaType mediaType, boolean useFiltering) throws IOException { + return delegate.newBuilder(mediaType, useFiltering); + } + + @Override + public XContentBuilder newBuilder(MediaType mediaType, MediaType responseContentType, boolean useFiltering) throws IOException { + return delegate.newBuilder(mediaType, responseContentType, useFiltering); + } + + @Override + public BytesStreamOutput bytesOutput() { + return delegate.bytesOutput(); + } + + @Override + public RestRequest request() { + return delegate.request(); + } + + @Override + public boolean detailedErrorsEnabled() { + return delegate.detailedErrorsEnabled(); + } + + @Override + public void sendResponse(RestResponse response) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.sendResponse(response); + } finally { + span.endSpan(); + } + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java new file mode 100644 index 0000000000000..45268b4807cd9 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/TraceableTcpTransportChannel.java @@ -0,0 +1,117 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.channels; + +import org.opensearch.Version; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.transport.BaseTcpTransportChannel; +import org.opensearch.transport.TcpTransportChannel; +import org.opensearch.transport.TransportChannel; + +import java.io.IOException; +import java.util.Optional; + +/** + * Tracer wrapped {@link TransportChannel} + */ +public class TraceableTcpTransportChannel extends BaseTcpTransportChannel { + + private final TransportChannel delegate; + private final Span span; + private final Tracer tracer; + + /** + * Constructor. + * @param delegate delegate + * @param span span + * @param tracer tracer + */ + public TraceableTcpTransportChannel(TcpTransportChannel delegate, Span span, Tracer tracer) { + super(delegate.getChannel()); + this.delegate = delegate; + this.span = span; + this.tracer = tracer; + } + + /** + * Factory method. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + * @return transport channel + */ + public static TransportChannel create(TcpTransportChannel delegate, final Span span, final Tracer tracer) { + if (tracer.isRecording() == true) { + delegate.getChannel().addCloseListener(new ActionListener() { + @Override + public void onResponse(Void unused) { + onFailure(null); + } + + @Override + public void onFailure(Exception e) { + span.addEvent("The TransportChannel was closed without sending the response"); + span.setError(e); + span.endSpan(); + } + }); + + return new TraceableTcpTransportChannel(delegate, span, tracer); + } else { + return delegate; + } + } + + @Override + public String getProfileName() { + return delegate.getProfileName(); + } + + @Override + public String getChannelType() { + return delegate.getChannelType(); + } + + @Override + public void sendResponse(TransportResponse response) throws IOException { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.sendResponse(response); + } catch (final IOException ex) { + span.setError(ex); + throw ex; + } finally { + span.endSpan(); + } + } + + @Override + public void sendResponse(Exception exception) throws IOException { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.sendResponse(exception); + } finally { + span.setError(exception); + span.endSpan(); + } + } + + @Override + public Version getVersion() { + return delegate.getVersion(); + } + + @Override + public Optional get(String name, Class clazz) { + return delegate.get(name, clazz); + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/channels/package-info.java b/server/src/main/java/org/opensearch/telemetry/tracing/channels/package-info.java new file mode 100644 index 0000000000000..ee4b675d5dc30 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/channels/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.tracing.channels; diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java b/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java new file mode 100644 index 0000000000000..eb9d53d2df51b --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/handler/TraceableTransportResponseHandler.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.handler; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponseHandler; + +import java.io.IOException; +import java.util.Objects; + +/** + * Tracer wrapped {@link TransportResponseHandler} + * @param TransportResponse + */ +public class TraceableTransportResponseHandler implements TransportResponseHandler { + + private final Span span; + private final TransportResponseHandler delegate; + private final Tracer tracer; + + /** + * Constructor. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + */ + private TraceableTransportResponseHandler(TransportResponseHandler delegate, Span span, Tracer tracer) { + this.delegate = Objects.requireNonNull(delegate); + this.span = Objects.requireNonNull(span); + this.tracer = Objects.requireNonNull(tracer); + } + + /** + * Factory method. + * @param delegate delegate + * @param span span + * @param tracer tracer + * @return transportResponseHandler + */ + public static TransportResponseHandler create( + TransportResponseHandler delegate, + Span span, + Tracer tracer + ) { + if (tracer.isRecording() == true) { + return new TraceableTransportResponseHandler(delegate, span, tracer); + } else { + return delegate; + } + } + + @Override + public T read(StreamInput in) throws IOException { + return delegate.read(in); + } + + @Override + public void handleResponse(T response) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.handleResponse(response); + } finally { + span.endSpan(); + } + } + + @Override + public void handleException(TransportException exp) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.handleException(exp); + } finally { + span.setError(exp); + span.endSpan(); + } + } + + @Override + public String executor() { + return delegate.executor(); + } + + @Override + public String toString() { + return delegate.toString(); + } + + @Override + public void handleRejection(Exception exp) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.handleRejection(exp); + } finally { + span.setError(exp); + span.endSpan(); + } + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/handler/package-info.java b/server/src/main/java/org/opensearch/telemetry/tracing/handler/package-info.java new file mode 100644 index 0000000000000..ff9f8f57dc07c --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/handler/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.tracing.handler; diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableActionListener.java b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableActionListener.java new file mode 100644 index 0000000000000..0cb4ce71d05f8 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/listener/TraceableActionListener.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.listener; + +import org.opensearch.core.action.ActionListener; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; + +import java.util.Objects; + +/** + * Tracer wrapped {@link ActionListener} + * @param response. + */ +public class TraceableActionListener implements ActionListener { + + private final ActionListener delegate; + private final Span span; + private final Tracer tracer; + + /** + * Constructor. + * + * @param delegate delegate + * @param span span + * @param tracer tracer + */ + private TraceableActionListener(ActionListener delegate, Span span, Tracer tracer) { + this.delegate = Objects.requireNonNull(delegate); + this.span = Objects.requireNonNull(span); + this.tracer = Objects.requireNonNull(tracer); + } + + /** + * Factory method. + * @param delegate delegate + * @param span span + * @param tracer tracer + * @return action listener + */ + public static ActionListener create(ActionListener delegate, Span span, Tracer tracer) { + if (tracer.isRecording() == true) { + return new TraceableActionListener(delegate, span, tracer); + } else { + return delegate; + } + } + + @Override + public void onResponse(Response response) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.onResponse(response); + } finally { + span.endSpan(); + } + + } + + @Override + public void onFailure(Exception e) { + try (SpanScope scope = tracer.withSpanInScope(span)) { + delegate.onFailure(e); + } finally { + span.setError(e); + span.endSpan(); + } + + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/listener/package-info.java b/server/src/main/java/org/opensearch/telemetry/tracing/listener/package-info.java new file mode 100644 index 0000000000000..5dcb570c2bb2e --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/listener/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.tracing.listener; diff --git a/server/src/main/java/org/opensearch/threadpool/Scheduler.java b/server/src/main/java/org/opensearch/threadpool/Scheduler.java index 86c322ec89dd7..4a7c63a1b559a 100644 --- a/server/src/main/java/org/opensearch/threadpool/Scheduler.java +++ b/server/src/main/java/org/opensearch/threadpool/Scheduler.java @@ -60,7 +60,7 @@ public interface Scheduler { /** * Create a scheduler that can be used client side. Server side, please use ThreadPool.schedule instead. - * + *

            * Notice that if any scheduled jobs fail with an exception, these will bubble up to the uncaught exception handler where they will * be logged as a warning. This includes jobs started using execute, submit and schedule. * @param settings the settings to use @@ -178,7 +178,7 @@ interface ScheduledCancellable extends Delayed, Cancellable {} * This class encapsulates the scheduling of a {@link Runnable} that needs to be repeated on a interval. For example, checking a value * for cleanup every second could be done by passing in a Runnable that can perform the check and the specified interval between * executions of this runnable. NOTE: the runnable is only rescheduled to run again after completion of the runnable. - * + *

            * For this class, completion means that the call to {@link Runnable#run()} returned or an exception was thrown and caught. In * case of an exception, this class will log the exception and reschedule the runnable for its next execution. This differs from the * {@link ScheduledThreadPoolExecutor#scheduleWithFixedDelay(Runnable, long, long, TimeUnit)} semantics as an exception there would diff --git a/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java b/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java index 183b9b2f4cf9a..90f50f78d84ad 100644 --- a/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java +++ b/server/src/main/java/org/opensearch/threadpool/TaskAwareRunnable.java @@ -25,7 +25,7 @@ /** * Responsible for wrapping the original task's runnable and sending updates on when it starts and finishes to * entities listening to the events. - * + *

            * It's able to associate runnable with a task with the help of task Id available in thread context. */ public class TaskAwareRunnable extends AbstractRunnable implements WrappedRunnable { diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index bcfe4be750cdf..9741fc9f2f892 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.SizeValue; @@ -77,8 +78,9 @@ /** * The OpenSearch threadpool class * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ThreadPool implements ReportingService, Scheduler { private static final Logger logger = LogManager.getLogger(ThreadPool.class); @@ -112,6 +114,7 @@ public static class Names { public static final String TRANSLOG_SYNC = "translog_sync"; public static final String REMOTE_PURGE = "remote_purge"; public static final String REMOTE_REFRESH_RETRY = "remote_refresh_retry"; + public static final String REMOTE_RECOVERY = "remote_recovery"; public static final String INDEX_SEARCHER = "index_searcher"; } @@ -181,6 +184,7 @@ public static ThreadPoolType fromType(String type) { map.put(Names.TRANSLOG_SYNC, ThreadPoolType.FIXED); map.put(Names.REMOTE_PURGE, ThreadPoolType.SCALING); map.put(Names.REMOTE_REFRESH_RETRY, ThreadPoolType.SCALING); + map.put(Names.REMOTE_RECOVERY, ThreadPoolType.SCALING); if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { map.put(Names.INDEX_SEARCHER, ThreadPoolType.FIXED_AUTO_QUEUE_SIZE); } @@ -278,6 +282,15 @@ public ThreadPool( Names.REMOTE_REFRESH_RETRY, new ScalingExecutorBuilder(Names.REMOTE_REFRESH_RETRY, 1, halfProcMaxAt10, TimeValue.timeValueMinutes(5)) ); + builders.put( + Names.REMOTE_RECOVERY, + new ScalingExecutorBuilder( + Names.REMOTE_RECOVERY, + 1, + twiceAllocatedProcessors(allocatedProcessors), + TimeValue.timeValueMinutes(5) + ) + ); if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) { builders.put( Names.INDEX_SEARCHER, @@ -332,7 +345,7 @@ public ThreadPool( /** * Returns a value of milliseconds that may be used for relative time calculations. - * + *

            * This method should only be used for calculating time deltas. For an epoch based * timestamp, see {@link #absoluteTimeInMillis()}. */ @@ -342,7 +355,7 @@ public long relativeTimeInMillis() { /** * Returns a value of nanoseconds that may be used for relative time calculations. - * + *

            * This method should only be used for calculating time deltas. For an epoch based * timestamp, see {@link #absoluteTimeInMillis()}. */ @@ -355,7 +368,7 @@ public long relativeTimeInNanos() { * that require the highest precision possible. Performance critical code must use * either {@link #relativeTimeInNanos()} or {@link #relativeTimeInMillis()} which * give better performance at the cost of lower precision. - * + *

            * This method should only be used for calculating time deltas. For an epoch based * timestamp, see {@link #absoluteTimeInMillis()}. */ @@ -365,7 +378,7 @@ public long preciseRelativeTimeInNanos() { /** * Returns the value of milliseconds since UNIX epoch. - * + *

            * This method should only be used for exact date/time formatting. For calculating * time deltas that should not suffer from negative deltas, which are possible with * this method, see {@link #relativeTimeInMillis()}. @@ -401,19 +414,21 @@ public ThreadPoolStats stats() { long rejected = -1; int largest = -1; long completed = -1; - if (holder.executor() instanceof ThreadPoolExecutor) { - ThreadPoolExecutor threadPoolExecutor = (ThreadPoolExecutor) holder.executor(); + long waitTimeNanos = -1; + if (holder.executor() instanceof OpenSearchThreadPoolExecutor) { + OpenSearchThreadPoolExecutor threadPoolExecutor = (OpenSearchThreadPoolExecutor) holder.executor(); threads = threadPoolExecutor.getPoolSize(); queue = threadPoolExecutor.getQueue().size(); active = threadPoolExecutor.getActiveCount(); largest = threadPoolExecutor.getLargestPoolSize(); completed = threadPoolExecutor.getCompletedTaskCount(); + waitTimeNanos = threadPoolExecutor.getPoolWaitTimeNanos(); RejectedExecutionHandler rejectedExecutionHandler = threadPoolExecutor.getRejectedExecutionHandler(); if (rejectedExecutionHandler instanceof XRejectedExecutionHandler) { rejected = ((XRejectedExecutionHandler) rejectedExecutionHandler).rejected(); } } - stats.add(new ThreadPoolStats.Stats(name, threads, queue, active, rejected, largest, completed)); + stats.add(new ThreadPoolStats.Stats(name, threads, queue, active, rejected, largest, completed, waitTimeNanos)); } return new ThreadPoolStats(stats); } @@ -659,7 +674,7 @@ public String toString() { /** * A thread to cache millisecond time values from * {@link System#nanoTime()} and {@link System#currentTimeMillis()}. - * + *

            * The values are updated at a specified interval. */ static class CachedTimeThread extends Thread { diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPoolStats.java b/server/src/main/java/org/opensearch/threadpool/ThreadPoolStats.java index b4d7e4a3fbf7a..7b4c1504d927a 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPoolStats.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPoolStats.java @@ -32,6 +32,8 @@ package org.opensearch.threadpool; +import org.opensearch.Version; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -65,8 +67,9 @@ public static class Stats implements Writeable, ToXContentFragment, Comparable * {@link CompressibleBytesOutputStream#materializeBytes()} should be called when all the bytes have been * written to this stream. If compression is enabled, the proper EOS bytes will be written at that point. * The underlying {@link BytesReference} will be returned. - * + *

            * {@link CompressibleBytesOutputStream#close()} will NOT close the underlying stream. The byte stream passed * in the constructor must be closed individually. * diff --git a/server/src/main/java/org/opensearch/transport/InboundHandler.java b/server/src/main/java/org/opensearch/transport/InboundHandler.java index 9f9232c18079a..c14a53e799319 100644 --- a/server/src/main/java/org/opensearch/transport/InboundHandler.java +++ b/server/src/main/java/org/opensearch/transport/InboundHandler.java @@ -46,6 +46,11 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.channels.TraceableTcpTransportChannel; import org.opensearch.threadpool.ThreadPool; import java.io.EOFException; @@ -74,6 +79,8 @@ public class InboundHandler { private volatile long slowLogThresholdMs = Long.MAX_VALUE; + private final Tracer tracer; + InboundHandler( ThreadPool threadPool, OutboundHandler outboundHandler, @@ -81,7 +88,8 @@ public class InboundHandler { TransportHandshaker handshaker, TransportKeepAlive keepAlive, Transport.RequestHandlers requestHandlers, - Transport.ResponseHandlers responseHandlers + Transport.ResponseHandlers responseHandlers, + Tracer tracer ) { this.threadPool = threadPool; this.outboundHandler = outboundHandler; @@ -90,6 +98,7 @@ public class InboundHandler { this.keepAlive = keepAlive; this.requestHandlers = requestHandlers; this.responseHandlers = responseHandlers; + this.tracer = tracer; } void setMessageListener(TransportMessageListener listener) { @@ -108,7 +117,6 @@ void inboundMessage(TcpChannel channel, InboundMessage message) throws Exception final long startTime = threadPool.relativeTimeInMillis(); channel.getChannelStats().markAccessed(startTime); TransportLogger.logInboundMessage(channel, message); - if (message.isPing()) { keepAlive.receiveKeepAlive(channel); } else { @@ -123,7 +131,6 @@ private void messageReceived(TcpChannel channel, InboundMessage message, long st final InetSocketAddress remoteAddress = channel.getRemoteAddress(); final Header header = message.getHeader(); assert header.needsToReadVariableHeader() == false; - ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext existing = threadContext.stashContext()) { // Place the context with the headers from the message @@ -165,6 +172,7 @@ private void messageReceived(TcpChannel channel, InboundMessage message, long st handleResponse(requestId, remoteAddress, EMPTY_STREAM_INPUT, handler); } } + } } finally { final long took = threadPool.relativeTimeInMillis() - startTime; @@ -184,80 +192,89 @@ private void handleRequest(TcpChannel channel, Head final String action = header.getActionName(); final long requestId = header.getRequestId(); final Version version = header.getVersion(); - if (header.isHandshake()) { - messageListener.onRequestReceived(requestId, action); - // Cannot short circuit handshakes - assert message.isShortCircuit() == false; - final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); - assertRemoteVersion(stream, header.getVersion()); - final TransportChannel transportChannel = new TcpTransportChannel( - outboundHandler, - channel, - action, - requestId, - version, - header.getFeatures(), - header.isCompressed(), - header.isHandshake(), - message.takeBreakerReleaseControl() - ); - try { - handshaker.handleHandshake(transportChannel, requestId, stream); - } catch (Exception e) { - if (Version.CURRENT.isCompatible(header.getVersion())) { - sendErrorResponse(action, transportChannel, e); - } else { - logger.warn( - new ParameterizedMessage( - "could not send error response to handshake received on [{}] using wire format version [{}], closing channel", - channel, - header.getVersion() - ), - e - ); - channel.close(); - } - } - } else { - final TransportChannel transportChannel = new TcpTransportChannel( - outboundHandler, - channel, - action, - requestId, - version, - header.getFeatures(), - header.isCompressed(), - header.isHandshake(), - message.takeBreakerReleaseControl() - ); - try { + Span span = tracer.startSpan(SpanBuilder.from(action, channel)); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + if (header.isHandshake()) { messageListener.onRequestReceived(requestId, action); - if (message.isShortCircuit()) { - sendErrorResponse(action, transportChannel, message.getException()); - } else { - final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); - assertRemoteVersion(stream, header.getVersion()); - final RequestHandlerRegistry reg = requestHandlers.getHandler(action); - assert reg != null; - - final T request = newRequest(requestId, action, stream, reg); - request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); - checkStreamIsFullyConsumed(requestId, action, stream); - - final String executor = reg.getExecutor(); - if (ThreadPool.Names.SAME.equals(executor)) { - try { - reg.processMessageReceived(request, transportChannel); - } catch (Exception e) { - sendErrorResponse(reg.getAction(), transportChannel, e); - } + // Cannot short circuit handshakes + assert message.isShortCircuit() == false; + final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(stream, header.getVersion()); + final TcpTransportChannel transportChannel = new TcpTransportChannel( + outboundHandler, + channel, + action, + requestId, + version, + header.getFeatures(), + header.isCompressed(), + header.isHandshake(), + message.takeBreakerReleaseControl() + ); + TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); + try { + handshaker.handleHandshake(traceableTransportChannel, requestId, stream); + } catch (Exception e) { + if (Version.CURRENT.isCompatible(header.getVersion())) { + sendErrorResponse(action, traceableTransportChannel, e); } else { - threadPool.executor(executor).execute(new RequestHandler<>(reg, request, transportChannel)); + logger.warn( + new ParameterizedMessage( + "could not send error response to handshake received on [{}] using wire format version [{}], closing channel", + channel, + header.getVersion() + ), + e + ); + channel.close(); } } - } catch (Exception e) { - sendErrorResponse(action, transportChannel, e); + } else { + final TcpTransportChannel transportChannel = new TcpTransportChannel( + outboundHandler, + channel, + action, + requestId, + version, + header.getFeatures(), + header.isCompressed(), + header.isHandshake(), + message.takeBreakerReleaseControl() + ); + TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); + try { + messageListener.onRequestReceived(requestId, action); + if (message.isShortCircuit()) { + sendErrorResponse(action, traceableTransportChannel, message.getException()); + } else { + final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(stream, header.getVersion()); + final RequestHandlerRegistry reg = requestHandlers.getHandler(action); + assert reg != null; + + final T request = newRequest(requestId, action, stream, reg); + request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); + checkStreamIsFullyConsumed(requestId, action, stream); + + final String executor = reg.getExecutor(); + if (ThreadPool.Names.SAME.equals(executor)) { + try { + reg.processMessageReceived(request, traceableTransportChannel); + } catch (Exception e) { + sendErrorResponse(reg.getAction(), traceableTransportChannel, e); + } + } else { + threadPool.executor(executor).execute(new RequestHandler<>(reg, request, traceableTransportChannel)); + } + } + } catch (Exception e) { + sendErrorResponse(action, traceableTransportChannel, e); + } } + } catch (Exception e) { + span.setError(e); + span.endSpan(); + throw e; } } diff --git a/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java index 1599c4cb75517..8a5f6dfffb036 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/opensearch/transport/RemoteClusterConnection.java @@ -54,10 +54,10 @@ * current node is part of the cluster and it won't receive cluster state updates from the remote cluster. Remote clusters are also not * fully connected with the current node. From a connection perspective a local cluster forms a bi-directional star network while in the * remote case we only connect to a subset of the nodes in the cluster in an uni-directional fashion. - * + *

            * This class also handles the discovery of nodes from the remote cluster. The initial list of seed nodes is only used to discover all nodes * in the remote cluster and connects to all eligible nodes, for details see {@link RemoteClusterService#REMOTE_NODE_ATTRIBUTE}. - * + *

            * In the case of a disconnection, this class will issue a re-connect task to establish at most * {@link SniffConnectionStrategy#REMOTE_CONNECTIONS_PER_CLUSTER} until either all eligible nodes are exhausted or the maximum number of * connections per cluster has been reached. @@ -123,7 +123,7 @@ void ensureConnected(ActionListener listener) { /** * Collects all nodes on the connected cluster and returns / passes a nodeID to {@link DiscoveryNode} lookup function * that returns null if the node ID is not found. - * + *

            * The requests to get cluster state on the connected cluster are made in the system context because logically * they are equivalent to checking a single detail in the local cluster state and should not require that the * user who made the request that is using this method in its implementation is authorized to view the entire diff --git a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java index 464282730d2b2..98c182c562928 100644 --- a/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java +++ b/server/src/main/java/org/opensearch/transport/RequestHandlerRegistry.java @@ -91,14 +91,14 @@ public void processMessageReceived(Request request, TransportChannel channel) th Releasable unregisterTask = () -> taskManager.unregister(task); try { - if (channel instanceof TcpTransportChannel && task instanceof CancellableTask) { + if (channel instanceof BaseTcpTransportChannel && task instanceof CancellableTask) { if (request instanceof ShardSearchRequest) { // on receiving request, update the inbound network time to reflect time spent in transit over the network ((ShardSearchRequest) request).setInboundNetworkTime( Math.max(0, System.currentTimeMillis() - ((ShardSearchRequest) request).getInboundNetworkTime()) ); } - final TcpChannel tcpChannel = ((TcpTransportChannel) channel).getChannel(); + final TcpChannel tcpChannel = ((BaseTcpTransportChannel) channel).getChannel(); final Releasable stopTracking = taskManager.startTrackingCancellableChannelTask(tcpChannel, (CancellableTask) task); unregisterTask = Releasables.wrap(unregisterTask, stopTracking); } diff --git a/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java b/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java index 052611317f174..4dab0039ec878 100644 --- a/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java @@ -37,6 +37,7 @@ import org.opensearch.core.transport.TransportResponse; import java.io.IOException; +import java.util.Optional; /** * Transport channel for tasks @@ -89,4 +90,9 @@ public Version getVersion() { public TransportChannel getChannel() { return channel; } + + @Override + public Optional get(String name, Class clazz) { + return getChannel().get(name, clazz); + } } diff --git a/server/src/main/java/org/opensearch/transport/TcpChannel.java b/server/src/main/java/org/opensearch/transport/TcpChannel.java index eac137ec30f1a..f98b65d0a4df1 100644 --- a/server/src/main/java/org/opensearch/transport/TcpChannel.java +++ b/server/src/main/java/org/opensearch/transport/TcpChannel.java @@ -38,6 +38,7 @@ import org.opensearch.core.common.bytes.BytesReference; import java.net.InetSocketAddress; +import java.util.Optional; /** * This is a tcp channel representing a single channel connection to another node. It is the base channel @@ -96,6 +97,20 @@ public interface TcpChannel extends CloseableChannel { */ ChannelStats getChannelStats(); + /** + * Returns the contextual property associated with this specific TCP channel (the + * implementation of how such properties are managed depends on the the particular + * transport engine). + * + * @param name the name of the property + * @param clazz the expected type of the property + * + * @return the value of the property + */ + default Optional get(String name, Class clazz) { + return Optional.empty(); + } + /** * Channel statistics * diff --git a/server/src/main/java/org/opensearch/transport/TcpTransport.java b/server/src/main/java/org/opensearch/transport/TcpTransport.java index 7da7dcad13120..d0e6516973382 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransport.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransport.java @@ -68,6 +68,7 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.monitor.jvm.JvmInfo; import org.opensearch.node.Node; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -159,7 +160,8 @@ public TcpTransport( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { this.settings = settings; this.profileSettings = getProfileSettings(settings); @@ -208,7 +210,8 @@ public TcpTransport( handshaker, keepAlive, requestHandlers, - responseHandlers + responseHandlers, + tracer ); } diff --git a/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java b/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java index 00702d08902a9..81de0af07ea7c 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java @@ -38,6 +38,7 @@ import org.opensearch.search.query.QuerySearchResult; import java.io.IOException; +import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; @@ -46,11 +47,10 @@ * * @opensearch.internal */ -public final class TcpTransportChannel implements TransportChannel { +public final class TcpTransportChannel extends BaseTcpTransportChannel { private final AtomicBoolean released = new AtomicBoolean(); private final OutboundHandler outboundHandler; - private final TcpChannel channel; private final String action; private final long requestId; private final Version version; @@ -70,9 +70,9 @@ public final class TcpTransportChannel implements TransportChannel { boolean isHandshake, Releasable breakerRelease ) { + super(channel); this.version = version; this.features = features; - this.channel = channel; this.outboundHandler = outboundHandler; this.action = action; this.requestId = requestId; @@ -83,7 +83,7 @@ public final class TcpTransportChannel implements TransportChannel { @Override public String getProfileName() { - return channel.getProfile(); + return getChannel().getProfile(); } @Override @@ -93,7 +93,7 @@ public void sendResponse(TransportResponse response) throws IOException { // update outbound network time with current time before sending response over network ((QuerySearchResult) response).getShardSearchRequest().setOutboundNetworkTime(System.currentTimeMillis()); } - outboundHandler.sendResponse(version, features, channel, requestId, action, response, compressResponse, isHandshake); + outboundHandler.sendResponse(version, features, getChannel(), requestId, action, response, compressResponse, isHandshake); } finally { release(false); } @@ -102,7 +102,7 @@ public void sendResponse(TransportResponse response) throws IOException { @Override public void sendResponse(Exception exception) throws IOException { try { - outboundHandler.sendErrorResponse(version, features, channel, requestId, action, exception); + outboundHandler.sendErrorResponse(version, features, getChannel(), requestId, action, exception); } finally { release(true); } @@ -131,7 +131,8 @@ public Version getVersion() { return version; } - public TcpChannel getChannel() { - return channel; + @Override + public Optional get(String name, Class clazz) { + return getChannel().get(name, clazz); } } diff --git a/server/src/main/java/org/opensearch/transport/TransportChannel.java b/server/src/main/java/org/opensearch/transport/TransportChannel.java index 3c582127f28e8..7423d59103302 100644 --- a/server/src/main/java/org/opensearch/transport/TransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TransportChannel.java @@ -39,6 +39,7 @@ import org.opensearch.core.transport.TransportResponse; import java.io.IOException; +import java.util.Optional; /** * A transport channel allows to send a response to a request on the channel. @@ -78,4 +79,18 @@ static void sendErrorResponse(TransportChannel channel, String actionName, Trans ); } } + + /** + * Returns the contextual property associated with this specific transport channel (the + * implementation of how such properties are managed depends on the the particular + * transport engine). + * + * @param name the name of the property + * @param clazz the expected type of the property + * + * @return the value of the property. + */ + default Optional get(String name, Class clazz) { + return Optional.empty(); + } } diff --git a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java index 0b39983cc3bee..8992af18edb48 100644 --- a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java +++ b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java @@ -52,6 +52,13 @@ public interface TransportResponseHandler extends W String executor(); + /** + * This method should be handling the rejection/failure scenarios where connection to the node is rejected or failed. + * It should be used to clear up the resources held by the {@link TransportResponseHandler}. + * @param exp exception + */ + default void handleRejection(Exception exp) {} + default TransportResponseHandler wrap(Function converter, Writeable.Reader reader) { final TransportResponseHandler self = this; return new TransportResponseHandler() { diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index ec2369bb87cb3..93c89855f6107 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -68,6 +68,11 @@ import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.handler.TraceableTransportResponseHandler; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; @@ -135,11 +140,11 @@ protected boolean removeEldestEntry(Map.Entry eldest) { // tracer log private final Logger tracerLog; - volatile String[] tracerLogInclude; volatile String[] tracerLogExclude; private final RemoteClusterService remoteClusterService; + private final Tracer tracer; /** if set will call requests sent to this id to shortcut and executed locally */ volatile DiscoveryNode localNode = null; @@ -168,12 +173,12 @@ public void close() {} }; static { - /** - * Registers server specific types as a streamables for serialization - * over the {@link StreamOutput} and {@link StreamInput} wire + /* + Registers server specific types as a streamables for serialization + over the {@link StreamOutput} and {@link StreamInput} wire */ Streamables.registerStreamables(); - /** Registers OpenSearch server specific exceptions (exceptions outside of core library) */ + /* Registers OpenSearch server specific exceptions (exceptions outside of core library) */ OpenSearchServerException.registerExceptions(); } @@ -193,7 +198,8 @@ public TransportService( TransportInterceptor transportInterceptor, Function localNodeFactory, @Nullable ClusterSettings clusterSettings, - Set taskHeaders + Set taskHeaders, + Tracer tracer ) { this( settings, @@ -203,7 +209,8 @@ public TransportService( localNodeFactory, clusterSettings, taskHeaders, - new ClusterConnectionManager(settings, transport) + new ClusterConnectionManager(settings, transport), + tracer ); } @@ -215,7 +222,8 @@ public TransportService( Function localNodeFactory, @Nullable ClusterSettings clusterSettings, Set taskHeaders, - ConnectionManager connectionManager + ConnectionManager connectionManager, + Tracer tracer ) { this.transport = transport; transport.setSlowLogThreshold(TransportSettings.SLOW_OPERATION_THRESHOLD_SETTING.get(settings)); @@ -230,6 +238,7 @@ public TransportService( this.interceptor = transportInterceptor; this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); this.remoteClusterClient = DiscoveryNode.isRemoteClusterClient(settings); + this.tracer = tracer; remoteClusterService = new RemoteClusterService(settings, this); responseHandlers = transport.getResponseHandlers(); if (clusterSettings != null) { @@ -331,6 +340,7 @@ protected void doStop() { getExecutorService().execute(new AbstractRunnable() { @Override public void onRejection(Exception e) { + holderToNotify.handler().handleRejection(e); // if we get rejected during node shutdown we don't wanna bubble it up logger.debug( () -> new ParameterizedMessage( @@ -343,6 +353,7 @@ public void onRejection(Exception e) { @Override public void onFailure(Exception e) { + holderToNotify.handler().handleRejection(e); logger.warn( () -> new ParameterizedMessage( "failed to notify response handler on exception, action: {}", @@ -867,53 +878,60 @@ public final void sendRequest( final TransportRequestOptions options, final TransportResponseHandler handler ) { - try { - logger.debug("Action: " + action); - final TransportResponseHandler delegate; - if (request.getParentTask().isSet()) { - // TODO: capture the connection instead so that we can cancel child tasks on the remote connections. - final Releasable unregisterChildNode = taskManager.registerChildNode(request.getParentTask().getId(), connection.getNode()); - delegate = new TransportResponseHandler() { - @Override - public void handleResponse(T response) { - unregisterChildNode.close(); - handler.handleResponse(response); - } + final Span span = tracer.startSpan(SpanBuilder.from(action, connection)); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + TransportResponseHandler traceableTransportResponseHandler = TraceableTransportResponseHandler.create(handler, span, tracer); + try { + logger.debug("Action: " + action); + final TransportResponseHandler delegate; + if (request.getParentTask().isSet()) { + // TODO: capture the connection instead so that we can cancel child tasks on the remote connections. + final Releasable unregisterChildNode = taskManager.registerChildNode( + request.getParentTask().getId(), + connection.getNode() + ); + delegate = new TransportResponseHandler() { + @Override + public void handleResponse(T response) { + unregisterChildNode.close(); + traceableTransportResponseHandler.handleResponse(response); + } - @Override - public void handleException(TransportException exp) { - unregisterChildNode.close(); - handler.handleException(exp); - } + @Override + public void handleException(TransportException exp) { + unregisterChildNode.close(); + traceableTransportResponseHandler.handleException(exp); + } - @Override - public String executor() { - return handler.executor(); - } + @Override + public String executor() { + return traceableTransportResponseHandler.executor(); + } - @Override - public T read(StreamInput in) throws IOException { - return handler.read(in); - } + @Override + public T read(StreamInput in) throws IOException { + return traceableTransportResponseHandler.read(in); + } - @Override - public String toString() { - return getClass().getName() + "/[" + action + "]:" + handler.toString(); - } - }; - } else { - delegate = handler; - } - asyncSender.sendRequest(connection, action, request, options, delegate); - } catch (final Exception ex) { - // the caller might not handle this so we invoke the handler - final TransportException te; - if (ex instanceof TransportException) { - te = (TransportException) ex; - } else { - te = new TransportException("failure to send", ex); + @Override + public String toString() { + return getClass().getName() + "/[" + action + "]:" + handler.toString(); + } + }; + } else { + delegate = traceableTransportResponseHandler; + } + asyncSender.sendRequest(connection, action, request, options, delegate); + } catch (final Exception ex) { + // the caller might not handle this so we invoke the handler + final TransportException te; + if (ex instanceof TransportException) { + te = (TransportException) ex; + } else { + te = new TransportException("failure to send", ex); + } + traceableTransportResponseHandler.handleException(te); } - handler.handleException(te); } } @@ -1035,6 +1053,7 @@ private void sendRequestInternal( threadPool.executor(executor).execute(new AbstractRunnable() { @Override public void onRejection(Exception e) { + contextToNotify.handler().handleRejection(e); // if we get rejected during node shutdown we don't wanna bubble it up logger.debug( () -> new ParameterizedMessage( @@ -1047,6 +1066,7 @@ public void onRejection(Exception e) { @Override public void onFailure(Exception e) { + contextToNotify.handler().handleRejection(e); logger.warn( () -> new ParameterizedMessage( "failed to notify response handler on exception, action: {}", diff --git a/server/src/main/java/org/opensearch/watcher/FileWatcher.java b/server/src/main/java/org/opensearch/watcher/FileWatcher.java index 82f95d6a1622c..d773e3b5d7c9e 100644 --- a/server/src/main/java/org/opensearch/watcher/FileWatcher.java +++ b/server/src/main/java/org/opensearch/watcher/FileWatcher.java @@ -44,7 +44,7 @@ /** * File resources watcher - * + *

            * The file watcher checks directory and all its subdirectories for file changes and notifies its listeners accordingly * * @opensearch.internal diff --git a/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java b/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java index a7c7a248ce417..62002f9c6c323 100644 --- a/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java +++ b/server/src/main/java/org/opensearch/watcher/ResourceWatcherService.java @@ -33,6 +33,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; @@ -48,14 +49,15 @@ /** * Generic resource watcher service - * + *

            * Other opensearch services can register their resource watchers with this service using {@link #add(ResourceWatcher)} * method. This service will call {@link org.opensearch.watcher.ResourceWatcher#checkAndNotify()} method of all * registered watcher periodically. The frequency of checks can be specified using {@code resource.reload.interval} setting, which * defaults to {@code 60s}. The service can be disabled by setting {@code resource.reload.enabled} setting to {@code false}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class ResourceWatcherService implements Closeable { private static final Logger logger = LogManager.getLogger(ResourceWatcherService.class); diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec deleted file mode 100644 index 8b37d91cd8bc4..0000000000000 --- a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec +++ /dev/null @@ -1,2 +0,0 @@ -org.opensearch.index.codec.customcodecs.ZstdCodec -org.opensearch.index.codec.customcodecs.ZstdNoDictCodec diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index ed6acd0e3a903..9ec2bbc4d027f 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -84,6 +84,7 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentLocation; +import org.opensearch.crypto.CryptoRegistryException; import org.opensearch.discovery.MasterNotDiscoveredException; import org.opensearch.env.ShardLockObtainFailedException; import org.opensearch.index.engine.RecoveryEngineException; @@ -894,6 +895,7 @@ public void testIds() { ids.put(168, PreferenceBasedSearchNotAllowedException.class); ids.put(169, NodeWeighedAwayException.class); ids.put(170, SearchPipelineProcessingException.class); + ids.put(171, CryptoRegistryException.class); ids.put(10001, IndexCreateBlockException.class); Map, Integer> reverse = new HashMap<>(); diff --git a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java index 592217d9c65d0..ba6cd70d7992a 100644 --- a/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java +++ b/server/src/test/java/org/opensearch/OpenSearchExceptionTests.java @@ -972,7 +972,7 @@ public void testFailureToAndFromXContentWithDetails() throws IOException { /** * Builds a {@link ToXContent} using a JSON XContentBuilder and compares the result to the given json in string format. - * + *

            * By default, the stack trace of the exception is not rendered. The parameter `errorTrace` forces the stack trace to * be rendered like the REST API does when the "error_trace" parameter is set to true. */ diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java index 26b0c5ef05cdc..a015e671f4872 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java @@ -55,6 +55,7 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; @@ -139,7 +140,8 @@ public void setupForTest() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); final Settings.Builder nodeSettingsBuilder = Settings.builder(); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java index 535080b196f29..10e4ab6388be4 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsActionTests.java @@ -49,6 +49,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; @@ -111,7 +112,8 @@ public void setupForTest() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); new TransportClearVotingConfigExclusionsAction( diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index e6460e429bd42..253639c09b606 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -34,19 +34,28 @@ import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.cluster.coordination.PendingClusterStateStats; +import org.opensearch.cluster.coordination.PersistedStateStats; import org.opensearch.cluster.coordination.PublishClusterStateStats; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.WeightedRoutingStats; import org.opensearch.cluster.service.ClusterManagerThrottlingStats; +import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.metrics.OperationStats; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.indices.breaker.AllCircuitBreakerStats; import org.opensearch.core.indices.breaker.CircuitBreakerStats; import org.opensearch.discovery.DiscoveryStats; +import org.opensearch.gateway.remote.RemotePersistenceStats; import org.opensearch.http.HttpStats; +import org.opensearch.index.ReplicationStats; +import org.opensearch.index.SegmentReplicationRejectionStats; import org.opensearch.index.remote.RemoteSegmentStats; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.index.translog.RemoteTranslogStats; import org.opensearch.indices.NodeIndicesStats; import org.opensearch.ingest.IngestStats; import org.opensearch.monitor.fs.FsInfo; @@ -54,6 +63,8 @@ import org.opensearch.monitor.os.OsStats; import org.opensearch.monitor.process.ProcessStats; import org.opensearch.node.AdaptiveSelectionStats; +import org.opensearch.node.NodeResourceUsageStats; +import org.opensearch.node.NodesResourceUsageStats; import org.opensearch.node.ResponseCollectorService; import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; @@ -64,6 +75,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; @@ -277,6 +289,10 @@ public void testSerialization() throws IOException { assertEquals(ioStats.getTotalReadOperations(), deserializedIoStats.getTotalReadOperations()); assertEquals(ioStats.getTotalWriteKilobytes(), deserializedIoStats.getTotalWriteKilobytes()); assertEquals(ioStats.getTotalWriteOperations(), deserializedIoStats.getTotalWriteOperations()); + assertEquals(ioStats.getTotalReadTime(), deserializedIoStats.getTotalReadTime()); + assertEquals(ioStats.getTotalWriteTime(), deserializedIoStats.getTotalWriteTime()); + assertEquals(ioStats.getTotalQueueSize(), deserializedIoStats.getTotalQueueSize()); + assertEquals(ioStats.getTotalIOTimeMillis(), deserializedIoStats.getTotalIOTimeMillis()); assertEquals(ioStats.getDevicesStats().length, deserializedIoStats.getDevicesStats().length); for (int i = 0; i < ioStats.getDevicesStats().length; i++) { FsInfo.DeviceStats deviceStats = ioStats.getDevicesStats()[i]; @@ -337,6 +353,26 @@ public void testSerialization() throws IOException { assertEquals(queueStats.getTotal(), deserializedDiscoveryStats.getQueueStats().getTotal()); assertEquals(queueStats.getPending(), deserializedDiscoveryStats.getQueueStats().getPending()); } + ClusterStateStats stateStats = discoveryStats.getClusterStateStats(); + if (stateStats == null) { + assertNull(deserializedDiscoveryStats.getClusterStateStats()); + } else { + assertEquals(stateStats.getUpdateFailed(), deserializedDiscoveryStats.getClusterStateStats().getUpdateFailed()); + assertEquals(stateStats.getUpdateSuccess(), deserializedDiscoveryStats.getClusterStateStats().getUpdateSuccess()); + assertEquals( + stateStats.getUpdateTotalTimeInMillis(), + deserializedDiscoveryStats.getClusterStateStats().getUpdateTotalTimeInMillis() + ); + assertEquals(1, deserializedDiscoveryStats.getClusterStateStats().getPersistenceStats().size()); + PersistedStateStats deserializedRemoteStateStats = deserializedDiscoveryStats.getClusterStateStats() + .getPersistenceStats() + .get(0); + PersistedStateStats remoteStateStats = stateStats.getPersistenceStats().get(0); + assertEquals(remoteStateStats.getStatsName(), deserializedRemoteStateStats.getStatsName()); + assertEquals(remoteStateStats.getFailedCount(), deserializedRemoteStateStats.getFailedCount()); + assertEquals(remoteStateStats.getSuccessCount(), deserializedRemoteStateStats.getSuccessCount()); + assertEquals(remoteStateStats.getTotalTimeInMillis(), deserializedRemoteStateStats.getTotalTimeInMillis()); + } } IngestStats ingestStats = nodeStats.getIngestStats(); IngestStats deserializedIngestStats = deserializedNodeStats.getIngestStats(); @@ -388,6 +424,35 @@ public void testSerialization() throws IOException { assertEquals(aStats.responseTime, bStats.responseTime, 0.01); }); } + NodesResourceUsageStats resourceUsageStats = nodeStats.getResourceUsageStats(); + NodesResourceUsageStats deserializedResourceUsageStats = deserializedNodeStats.getResourceUsageStats(); + if (resourceUsageStats == null) { + assertNull(deserializedResourceUsageStats); + } else { + resourceUsageStats.getNodeIdToResourceUsageStatsMap().forEach((k, v) -> { + NodeResourceUsageStats aResourceUsageStats = resourceUsageStats.getNodeIdToResourceUsageStatsMap().get(k); + NodeResourceUsageStats bResourceUsageStats = deserializedResourceUsageStats.getNodeIdToResourceUsageStatsMap() + .get(k); + assertEquals( + aResourceUsageStats.getMemoryUtilizationPercent(), + bResourceUsageStats.getMemoryUtilizationPercent(), + 0.0 + ); + assertEquals(aResourceUsageStats.getCpuUtilizationPercent(), bResourceUsageStats.getCpuUtilizationPercent(), 0.0); + assertEquals(aResourceUsageStats.getTimestamp(), bResourceUsageStats.getTimestamp()); + }); + } + SegmentReplicationRejectionStats segmentReplicationRejectionStats = nodeStats.getSegmentReplicationRejectionStats(); + SegmentReplicationRejectionStats deserializedSegmentReplicationRejectionStats = deserializedNodeStats + .getSegmentReplicationRejectionStats(); + if (segmentReplicationRejectionStats == null) { + assertNull(deserializedSegmentReplicationRejectionStats); + } else { + assertEquals( + segmentReplicationRejectionStats.getTotalRejectionCount(), + deserializedSegmentReplicationRejectionStats.getTotalRejectionCount() + ); + } ScriptCacheStats scriptCacheStats = nodeStats.getScriptCacheStats(); ScriptCacheStats deserializedScriptCacheStats = deserializedNodeStats.getScriptCacheStats(); if (scriptCacheStats == null) { @@ -459,6 +524,21 @@ public void testSerialization() throws IOException { assertEquals(remoteSegmentStats.getUploadBytesFailed(), deserializedRemoteSegmentStats.getUploadBytesFailed()); assertEquals(remoteSegmentStats.getMaxRefreshTimeLag(), deserializedRemoteSegmentStats.getMaxRefreshTimeLag()); assertEquals(remoteSegmentStats.getMaxRefreshBytesLag(), deserializedRemoteSegmentStats.getMaxRefreshBytesLag()); + assertEquals(remoteSegmentStats.getTotalRefreshBytesLag(), deserializedRemoteSegmentStats.getTotalRefreshBytesLag()); + assertEquals(remoteSegmentStats.getTotalUploadTime(), deserializedRemoteSegmentStats.getTotalUploadTime()); + assertEquals(remoteSegmentStats.getTotalDownloadTime(), deserializedRemoteSegmentStats.getTotalDownloadTime()); + + RemoteTranslogStats remoteTranslogStats = nodeIndicesStats.getTranslog().getRemoteTranslogStats(); + RemoteTranslogStats deserializedRemoteTranslogStats = deserializedNodeIndicesStats.getTranslog() + .getRemoteTranslogStats(); + assertEquals(remoteTranslogStats, deserializedRemoteTranslogStats); + + ReplicationStats replicationStats = nodeIndicesStats.getSegments().getReplicationStats(); + + ReplicationStats deserializedReplicationStats = deserializedNodeIndicesStats.getSegments().getReplicationStats(); + assertEquals(replicationStats.getMaxBytesBehind(), deserializedReplicationStats.getMaxBytesBehind()); + assertEquals(replicationStats.getTotalBytesBehind(), deserializedReplicationStats.getTotalBytesBehind()); + assertEquals(replicationStats.getMaxReplicationLag(), deserializedReplicationStats.getMaxReplicationLag()); } } } @@ -583,7 +663,8 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { randomIntBetween(1, 1000), randomNonNegativeLong(), randomIntBetween(1, 1000), - randomIntBetween(1, 1000) + randomIntBetween(1, 1000), + randomIntBetween(-1, 10) ) ); } @@ -604,6 +685,10 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), null ); deviceStatsArray[i] = new FsInfo.DeviceStats( @@ -614,6 +699,10 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), previousDeviceStats ); } @@ -660,12 +749,16 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { ScriptStats scriptStats = frequently() ? new ScriptStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()) : null; + ClusterStateStats stateStats = new ClusterStateStats(); + RemotePersistenceStats remoteStateStats = new RemotePersistenceStats(); + stateStats.setPersistenceStats(Arrays.asList(remoteStateStats)); DiscoveryStats discoveryStats = frequently() ? new DiscoveryStats( randomBoolean() ? new PendingClusterStateStats(randomInt(), randomInt(), randomInt()) : null, randomBoolean() ? new PublishClusterStateStats(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()) - : null + : null, + randomBoolean() ? stateStats : null ) : null; IngestStats ingestStats = null; @@ -735,6 +828,34 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { } adaptiveSelectionStats = new AdaptiveSelectionStats(nodeConnections, nodeStats); } + NodesResourceUsageStats nodesResourceUsageStats = null; + if (frequently()) { + int numNodes = randomIntBetween(0, 10); + Map nodeConnections = new HashMap<>(); + Map resourceUsageStatsMap = new HashMap<>(); + for (int i = 0; i < numNodes; i++) { + String nodeId = randomAlphaOfLengthBetween(3, 10); + // add outgoing connection info + if (frequently()) { + nodeConnections.put(nodeId, randomLongBetween(0, 100)); + } + // add node calculations + if (frequently()) { + NodeResourceUsageStats stats = new NodeResourceUsageStats( + nodeId, + System.currentTimeMillis(), + randomDoubleBetween(1.0, 100.0, true), + randomDoubleBetween(1.0, 100.0, true) + ); + resourceUsageStatsMap.put(nodeId, stats); + } + } + nodesResourceUsageStats = new NodesResourceUsageStats(resourceUsageStatsMap); + } + SegmentReplicationRejectionStats segmentReplicationRejectionStats = null; + if (frequently()) { + segmentReplicationRejectionStats = new SegmentReplicationRejectionStats(randomNonNegativeLong()); + } ClusterManagerThrottlingStats clusterManagerThrottlingStats = null; if (frequently()) { clusterManagerThrottlingStats = new ClusterManagerThrottlingStats(); @@ -766,6 +887,7 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { discoveryStats, ingestStats, adaptiveSelectionStats, + nodesResourceUsageStats, scriptCacheStats, null, null, @@ -774,6 +896,8 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { weightedRoutingStats, null, null, + null, + segmentReplicationRejectionStats, null ); } @@ -781,7 +905,7 @@ public static NodeStats createNodeStats(boolean remoteStoreStats) { private static NodeIndicesStats getNodeIndicesStats(boolean remoteStoreStats) { NodeIndicesStats indicesStats = null; if (remoteStoreStats) { - indicesStats = new NodeIndicesStats(new CommonStats(CommonStatsFlags.ALL), new HashMap<>()); + indicesStats = new NodeIndicesStats(new CommonStats(CommonStatsFlags.ALL), new HashMap<>(), new SearchRequestStats()); RemoteSegmentStats remoteSegmentStats = indicesStats.getSegments().getRemoteSegmentStats(); remoteSegmentStats.addUploadBytesStarted(10L); remoteSegmentStats.addUploadBytesSucceeded(10L); @@ -789,12 +913,44 @@ private static NodeIndicesStats getNodeIndicesStats(boolean remoteStoreStats) { remoteSegmentStats.addDownloadBytesStarted(10L); remoteSegmentStats.addDownloadBytesSucceeded(10L); remoteSegmentStats.addDownloadBytesFailed(1L); - remoteSegmentStats.setMaxRefreshBytesLag(5L); + remoteSegmentStats.addTotalRefreshBytesLag(5L); + remoteSegmentStats.addMaxRefreshBytesLag(2L); remoteSegmentStats.setMaxRefreshTimeLag(2L); + remoteSegmentStats.addTotalUploadTime(20L); + remoteSegmentStats.addTotalDownloadTime(20L); + remoteSegmentStats.addTotalRejections(5L); + + RemoteTranslogStats remoteTranslogStats = indicesStats.getTranslog().getRemoteTranslogStats(); + RemoteTranslogStats otherRemoteTranslogStats = new RemoteTranslogStats(getRandomRemoteTranslogTransferTrackerStats()); + remoteTranslogStats.add(otherRemoteTranslogStats); } return indicesStats; } + private static RemoteTranslogTransferTracker.Stats getRandomRemoteTranslogTransferTrackerStats() { + return new RemoteTranslogTransferTracker.Stats( + new ShardId("test-idx", "test-idx", randomIntBetween(1, 10)), + 0L, + randomLongBetween(100, 500), + randomLongBetween(50, 100), + randomLongBetween(100, 200), + randomLongBetween(10000, 50000), + randomLongBetween(5000, 10000), + randomLongBetween(10000, 20000), + 0L, + 0D, + 0D, + 0D, + 0L, + 0L, + 0L, + 0L, + 0D, + 0D, + 0D + ); + } + private OperationStats getPipelineStats(List pipelineStats, String id) { return pipelineStats.stream().filter(p1 -> p1.getPipelineId().equals(id)).findFirst().map(p2 -> p2.getStats()).orElse(null); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index df74eac3e5099..8ee41e43a5bed 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -61,6 +61,7 @@ import org.opensearch.tasks.TaskCancellationService; import org.opensearch.tasks.TaskManager; import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.threadpool.RunnableTaskExecutionListener; @@ -210,13 +211,15 @@ public TestNode(String name, ThreadPool threadPool, Settings settings) { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ), threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddressDiscoveryNodeFunction, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ) { @Override protected TaskManager createTaskManager( diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponseTests.java index a4787a9beda8b..8cc0982c86233 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponseTests.java @@ -16,6 +16,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -24,10 +25,12 @@ import java.util.Map; import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.compareStatsResponse; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createEmptyTranslogStats; import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createShardRouting; import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForNewPrimary; import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForNewReplica; import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForRemoteStoreRestoredPrimary; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createTranslogStats; import static org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS; public class RemoteStoreStatsResponseTests extends OpenSearchTestCase { @@ -48,9 +51,10 @@ public void tearDown() throws Exception { } public void testSerializationForPrimary() throws Exception { - RemoteSegmentTransferTracker.Stats mockPrimaryTrackerStats = createStatsForNewPrimary(shardId); + RemoteSegmentTransferTracker.Stats mockSegmentTrackerStats = createStatsForNewPrimary(shardId); + RemoteTranslogTransferTracker.Stats mockTranslogTrackerStats = createTranslogStats(shardId); ShardRouting primaryShardRouting = createShardRouting(shardId, true); - RemoteStoreStats primaryShardStats = new RemoteStoreStats(mockPrimaryTrackerStats, primaryShardRouting); + RemoteStoreStats primaryShardStats = new RemoteStoreStats(mockSegmentTrackerStats, mockTranslogTrackerStats, primaryShardRouting); RemoteStoreStatsResponse statsResponse = new RemoteStoreStatsResponse( new RemoteStoreStats[] { primaryShardStats }, 1, @@ -73,16 +77,26 @@ public void testSerializationForPrimary() throws Exception { ArrayList> perShardNumberObject = (ArrayList>) shardsObject.get("0"); assertEquals(perShardNumberObject.size(), 1); Map perShardCopyObject = perShardNumberObject.get(0); - compareStatsResponse(perShardCopyObject, mockPrimaryTrackerStats, primaryShardRouting); + compareStatsResponse(perShardCopyObject, mockSegmentTrackerStats, mockTranslogTrackerStats, primaryShardRouting); } public void testSerializationForBothPrimaryAndReplica() throws Exception { - RemoteSegmentTransferTracker.Stats mockPrimaryTrackerStats = createStatsForNewPrimary(shardId); - RemoteSegmentTransferTracker.Stats mockReplicaTrackerStats = createStatsForNewReplica(shardId); + RemoteSegmentTransferTracker.Stats mockPrimarySegmentTrackerStats = createStatsForNewPrimary(shardId); + RemoteSegmentTransferTracker.Stats mockReplicaSegmentTrackerStats = createStatsForNewReplica(shardId); + RemoteTranslogTransferTracker.Stats mockPrimaryTranslogTrackerStats = createTranslogStats(shardId); + RemoteTranslogTransferTracker.Stats mockReplicaTranslogTrackerStats = createEmptyTranslogStats(shardId); ShardRouting primaryShardRouting = createShardRouting(shardId, true); ShardRouting replicaShardRouting = createShardRouting(shardId, false); - RemoteStoreStats primaryShardStats = new RemoteStoreStats(mockPrimaryTrackerStats, primaryShardRouting); - RemoteStoreStats replicaShardStats = new RemoteStoreStats(mockReplicaTrackerStats, replicaShardRouting); + RemoteStoreStats primaryShardStats = new RemoteStoreStats( + mockPrimarySegmentTrackerStats, + mockPrimaryTranslogTrackerStats, + primaryShardRouting + ); + RemoteStoreStats replicaShardStats = new RemoteStoreStats( + mockReplicaSegmentTrackerStats, + mockReplicaTranslogTrackerStats, + replicaShardRouting + ); RemoteStoreStatsResponse statsResponse = new RemoteStoreStatsResponse( new RemoteStoreStats[] { primaryShardStats, replicaShardStats }, 2, @@ -109,20 +123,30 @@ public void testSerializationForBothPrimaryAndReplica() throws Exception { RemoteStoreStats.RoutingFields.PRIMARY ); if (isPrimary) { - compareStatsResponse(shardObject, mockPrimaryTrackerStats, primaryShardRouting); + compareStatsResponse(shardObject, mockPrimarySegmentTrackerStats, mockPrimaryTranslogTrackerStats, primaryShardRouting); } else { - compareStatsResponse(shardObject, mockReplicaTrackerStats, replicaShardRouting); + compareStatsResponse(shardObject, mockReplicaSegmentTrackerStats, mockReplicaTranslogTrackerStats, replicaShardRouting); } }); } public void testSerializationForBothRemoteStoreRestoredPrimaryAndReplica() throws Exception { - RemoteSegmentTransferTracker.Stats mockPrimaryTrackerStats = createStatsForRemoteStoreRestoredPrimary(shardId); - RemoteSegmentTransferTracker.Stats mockReplicaTrackerStats = createStatsForNewReplica(shardId); + RemoteSegmentTransferTracker.Stats mockPrimarySegmentTrackerStats = createStatsForRemoteStoreRestoredPrimary(shardId); + RemoteSegmentTransferTracker.Stats mockReplicaSegmentTrackerStats = createStatsForNewReplica(shardId); + RemoteTranslogTransferTracker.Stats mockPrimaryTranslogTrackerStats = createTranslogStats(shardId); + RemoteTranslogTransferTracker.Stats mockReplicaTranslogTrackerStats = createEmptyTranslogStats(shardId); ShardRouting primaryShardRouting = createShardRouting(shardId, true); ShardRouting replicaShardRouting = createShardRouting(shardId, false); - RemoteStoreStats primaryShardStats = new RemoteStoreStats(mockPrimaryTrackerStats, primaryShardRouting); - RemoteStoreStats replicaShardStats = new RemoteStoreStats(mockReplicaTrackerStats, replicaShardRouting); + RemoteStoreStats primaryShardStats = new RemoteStoreStats( + mockPrimarySegmentTrackerStats, + mockPrimaryTranslogTrackerStats, + primaryShardRouting + ); + RemoteStoreStats replicaShardStats = new RemoteStoreStats( + mockReplicaSegmentTrackerStats, + mockReplicaTranslogTrackerStats, + replicaShardRouting + ); RemoteStoreStatsResponse statsResponse = new RemoteStoreStatsResponse( new RemoteStoreStats[] { primaryShardStats, replicaShardStats }, 2, @@ -149,9 +173,9 @@ public void testSerializationForBothRemoteStoreRestoredPrimaryAndReplica() throw RemoteStoreStats.RoutingFields.PRIMARY ); if (isPrimary) { - compareStatsResponse(shardObject, mockPrimaryTrackerStats, primaryShardRouting); + compareStatsResponse(shardObject, mockPrimarySegmentTrackerStats, mockPrimaryTranslogTrackerStats, primaryShardRouting); } else { - compareStatsResponse(shardObject, mockReplicaTrackerStats, replicaShardRouting); + compareStatsResponse(shardObject, mockReplicaSegmentTrackerStats, mockReplicaTranslogTrackerStats, replicaShardRouting); } }); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTestHelper.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTestHelper.java index 7430ccaed725b..5886d47a7be24 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTestHelper.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTestHelper.java @@ -13,12 +13,14 @@ import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.store.DirectoryFileTransferTracker; import java.util.Map; import static org.opensearch.test.OpenSearchTestCase.assertEquals; import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLength; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; /** @@ -46,6 +48,7 @@ static RemoteSegmentTransferTracker.Stats createStatsForNewPrimary(ShardId shard 0, 0, 0, + 10, createZeroDirectoryFileTransferStats() ); } @@ -71,6 +74,7 @@ static RemoteSegmentTransferTracker.Stats createStatsForNewReplica(ShardId shard 0, 0, 0, + 0, createSampleDirectoryFileTransferStats() ); } @@ -96,27 +100,38 @@ static RemoteSegmentTransferTracker.Stats createStatsForRemoteStoreRestoredPrima 0, 0, 100, + 10, createSampleDirectoryFileTransferStats() ); } static DirectoryFileTransferTracker.Stats createSampleDirectoryFileTransferStats() { - return new DirectoryFileTransferTracker.Stats(10, 0, 10, 12345, 5, 5, 5); + return new DirectoryFileTransferTracker.Stats(10, 0, 10, 12345, 5, 5, 5, 10); } static DirectoryFileTransferTracker.Stats createZeroDirectoryFileTransferStats() { - return new DirectoryFileTransferTracker.Stats(0, 0, 0, 0, 0, 0, 0); + return new DirectoryFileTransferTracker.Stats(0, 0, 0, 0, 0, 0, 0, 0); } static ShardRouting createShardRouting(ShardId shardId, boolean isPrimary) { return TestShardRouting.newShardRouting(shardId, randomAlphaOfLength(4), isPrimary, ShardRoutingState.STARTED); } + static RemoteTranslogTransferTracker.Stats createTranslogStats(ShardId shardId) { + return new RemoteTranslogTransferTracker.Stats(shardId, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9D, 10D, 11D, 1L, 2L, 3L, 4L, 9D, 10D, 11D); + } + + static RemoteTranslogTransferTracker.Stats createEmptyTranslogStats(ShardId shardId) { + return new RemoteTranslogTransferTracker.Stats(shardId, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0D, 0D, 0D, 0L, 0L, 0L, 0L, 0D, 0D, 0D); + } + static void compareStatsResponse( Map statsObject, - RemoteSegmentTransferTracker.Stats segmentStatsTracker, + RemoteSegmentTransferTracker.Stats segmentTransferStats, + RemoteTranslogTransferTracker.Stats translogTransferStats, ShardRouting routing ) { + // Compare Remote Segment Store stats assertEquals( ((Map) statsObject.get(RemoteStoreStats.Fields.ROUTING)).get(RemoteStoreStats.RoutingFields.NODE_ID), routing.currentNodeId() @@ -134,138 +149,269 @@ static void compareStatsResponse( Map segmentDownloads = ((Map) segment.get(RemoteStoreStats.SubFields.DOWNLOAD)); Map segmentUploads = ((Map) segment.get(RemoteStoreStats.SubFields.UPLOAD)); - if (segmentStatsTracker.directoryFileTransferTrackerStats.transferredBytesStarted != 0) { + if (segmentTransferStats.directoryFileTransferTrackerStats.transferredBytesStarted != 0) { assertEquals( segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.LAST_SYNC_TIMESTAMP), - (int) segmentStatsTracker.directoryFileTransferTrackerStats.lastTransferTimestampMs + (int) segmentTransferStats.directoryFileTransferTrackerStats.lastTransferTimestampMs ); assertEquals( - ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOADS_IN_BYTES)).get( - RemoteStoreStats.SubFields.STARTED + ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOAD_SIZE)).get( + RemoteStoreStats.SubFields.STARTED_BYTES ), - (int) segmentStatsTracker.directoryFileTransferTrackerStats.transferredBytesStarted + (int) segmentTransferStats.directoryFileTransferTrackerStats.transferredBytesStarted ); assertEquals( - ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOADS_IN_BYTES)).get( - RemoteStoreStats.SubFields.SUCCEEDED + ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOAD_SIZE)).get( + RemoteStoreStats.SubFields.SUCCEEDED_BYTES ), - (int) segmentStatsTracker.directoryFileTransferTrackerStats.transferredBytesSucceeded + (int) segmentTransferStats.directoryFileTransferTrackerStats.transferredBytesSucceeded ); assertEquals( - ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOADS_IN_BYTES)).get( - RemoteStoreStats.SubFields.FAILED + ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOAD_SIZE)).get( + RemoteStoreStats.SubFields.FAILED_BYTES ), - (int) segmentStatsTracker.directoryFileTransferTrackerStats.transferredBytesFailed + (int) segmentTransferStats.directoryFileTransferTrackerStats.transferredBytesFailed ); assertEquals( ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.DOWNLOAD_SIZE_IN_BYTES)).get( RemoteStoreStats.SubFields.LAST_SUCCESSFUL ), - (int) segmentStatsTracker.directoryFileTransferTrackerStats.lastSuccessfulTransferInBytes + (int) segmentTransferStats.directoryFileTransferTrackerStats.lastSuccessfulTransferInBytes ); assertEquals( ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.DOWNLOAD_SIZE_IN_BYTES)).get( RemoteStoreStats.SubFields.MOVING_AVG ), - segmentStatsTracker.directoryFileTransferTrackerStats.transferredBytesMovingAverage + segmentTransferStats.directoryFileTransferTrackerStats.transferredBytesMovingAverage ); assertEquals( ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.DOWNLOAD_SPEED_IN_BYTES_PER_SEC)).get( RemoteStoreStats.SubFields.MOVING_AVG ), - segmentStatsTracker.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage + segmentTransferStats.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage ); } else { assertTrue(segmentDownloads.isEmpty()); } - if (segmentStatsTracker.totalUploadsStarted != 0) { + if (segmentTransferStats.totalUploadsStarted != 0) { assertEquals( segmentUploads.get(RemoteStoreStats.UploadStatsFields.LOCAL_REFRESH_TIMESTAMP), - (int) segmentStatsTracker.localRefreshClockTimeMs + (int) segmentTransferStats.localRefreshClockTimeMs ); assertEquals( segmentUploads.get(RemoteStoreStats.UploadStatsFields.REMOTE_REFRESH_TIMESTAMP), - (int) segmentStatsTracker.remoteRefreshClockTimeMs + (int) segmentTransferStats.remoteRefreshClockTimeMs ); assertEquals( segmentUploads.get(RemoteStoreStats.UploadStatsFields.REFRESH_TIME_LAG_IN_MILLIS), - (int) segmentStatsTracker.refreshTimeLagMs + (int) segmentTransferStats.refreshTimeLagMs ); assertEquals( segmentUploads.get(RemoteStoreStats.UploadStatsFields.REFRESH_LAG), - (int) (segmentStatsTracker.localRefreshNumber - segmentStatsTracker.remoteRefreshNumber) + (int) (segmentTransferStats.localRefreshNumber - segmentTransferStats.remoteRefreshNumber) ); - assertEquals(segmentUploads.get(RemoteStoreStats.UploadStatsFields.BYTES_LAG), (int) segmentStatsTracker.bytesLag); + assertEquals(segmentUploads.get(RemoteStoreStats.UploadStatsFields.BYTES_LAG), (int) segmentTransferStats.bytesLag); assertEquals( segmentUploads.get(RemoteStoreStats.UploadStatsFields.BACKPRESSURE_REJECTION_COUNT), - (int) segmentStatsTracker.rejectionCount + (int) segmentTransferStats.rejectionCount ); assertEquals( segmentUploads.get(RemoteStoreStats.UploadStatsFields.CONSECUTIVE_FAILURE_COUNT), - (int) segmentStatsTracker.consecutiveFailuresCount + (int) segmentTransferStats.consecutiveFailuresCount ); assertEquals( - ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS_IN_BYTES)).get( - RemoteStoreStats.SubFields.STARTED + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOAD_SIZE)).get( + RemoteStoreStats.SubFields.STARTED_BYTES ), - (int) segmentStatsTracker.uploadBytesStarted + (int) segmentTransferStats.uploadBytesStarted ); assertEquals( - ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS_IN_BYTES)).get( - RemoteStoreStats.SubFields.SUCCEEDED + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOAD_SIZE)).get( + RemoteStoreStats.SubFields.SUCCEEDED_BYTES ), - (int) segmentStatsTracker.uploadBytesSucceeded + (int) segmentTransferStats.uploadBytesSucceeded ); assertEquals( - ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS_IN_BYTES)).get( - RemoteStoreStats.SubFields.FAILED + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOAD_SIZE)).get( + RemoteStoreStats.SubFields.FAILED_BYTES ), - (int) segmentStatsTracker.uploadBytesFailed + (int) segmentTransferStats.uploadBytesFailed ); assertEquals( ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.REMOTE_REFRESH_SIZE_IN_BYTES)).get( RemoteStoreStats.SubFields.MOVING_AVG ), - segmentStatsTracker.uploadBytesMovingAverage + segmentTransferStats.uploadBytesMovingAverage ); assertEquals( ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.REMOTE_REFRESH_SIZE_IN_BYTES)).get( RemoteStoreStats.SubFields.LAST_SUCCESSFUL ), - (int) segmentStatsTracker.lastSuccessfulRemoteRefreshBytes + (int) segmentTransferStats.lastSuccessfulRemoteRefreshBytes ); assertEquals( - ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.UPLOAD_LATENCY_IN_BYTES_PER_SEC)).get( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.UPLOAD_SPEED_IN_BYTES_PER_SEC)).get( RemoteStoreStats.SubFields.MOVING_AVG ), - segmentStatsTracker.uploadBytesPerSecMovingAverage + segmentTransferStats.uploadBytesPerSecMovingAverage ); assertEquals( - ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_SYNCS_TO_REMOTE)).get( - RemoteStoreStats.SubFields.STARTED - ), - (int) segmentStatsTracker.totalUploadsStarted + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS)).get(RemoteStoreStats.SubFields.STARTED), + (int) segmentTransferStats.totalUploadsStarted ); assertEquals( - ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_SYNCS_TO_REMOTE)).get( - RemoteStoreStats.SubFields.SUCCEEDED - ), - (int) segmentStatsTracker.totalUploadsSucceeded + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS)).get(RemoteStoreStats.SubFields.SUCCEEDED), + (int) segmentTransferStats.totalUploadsSucceeded ); assertEquals( - ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_SYNCS_TO_REMOTE)).get(RemoteStoreStats.SubFields.FAILED), - (int) segmentStatsTracker.totalUploadsFailed + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS)).get(RemoteStoreStats.SubFields.FAILED), + (int) segmentTransferStats.totalUploadsFailed ); assertEquals( ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.REMOTE_REFRESH_LATENCY_IN_MILLIS)).get( RemoteStoreStats.SubFields.MOVING_AVG ), - segmentStatsTracker.uploadTimeMovingAverage + segmentTransferStats.uploadTimeMovingAverage ); } else { assertTrue(segmentUploads.isEmpty()); } + + // Compare Remote Translog Store stats + Map tlogStatsObj = (Map) statsObject.get(RemoteStoreStats.Fields.TRANSLOG); + Map tlogUploadStatsObj = (Map) tlogStatsObj.get(RemoteStoreStats.SubFields.UPLOAD); + if (translogTransferStats.totalUploadsStarted > 0) { + assertEquals( + translogTransferStats.lastSuccessfulUploadTimestamp, + Long.parseLong(tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.LAST_SUCCESSFUL_UPLOAD_TIMESTAMP).toString()) + ); + + assertEquals( + translogTransferStats.totalUploadsStarted, + Long.parseLong( + ((Map) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS)).get( + RemoteStoreStats.SubFields.STARTED + ).toString() + ) + ); + assertEquals( + translogTransferStats.totalUploadsSucceeded, + Long.parseLong( + ((Map) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS)).get( + RemoteStoreStats.SubFields.SUCCEEDED + ).toString() + ) + ); + assertEquals( + translogTransferStats.totalUploadsFailed, + Long.parseLong( + ((Map) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS)).get( + RemoteStoreStats.SubFields.FAILED + ).toString() + ) + ); + + assertEquals( + translogTransferStats.uploadBytesStarted, + Long.parseLong( + ((Map) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOAD_SIZE)).get( + RemoteStoreStats.SubFields.STARTED_BYTES + ).toString() + ) + ); + assertEquals( + translogTransferStats.uploadBytesSucceeded, + Long.parseLong( + ((Map) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOAD_SIZE)).get( + RemoteStoreStats.SubFields.SUCCEEDED_BYTES + ).toString() + ) + ); + assertEquals( + translogTransferStats.uploadBytesFailed, + Long.parseLong( + ((Map) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOAD_SIZE)).get( + RemoteStoreStats.SubFields.FAILED_BYTES + ).toString() + ) + ); + + assertEquals( + translogTransferStats.totalUploadTimeInMillis, + Long.parseLong(tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOAD_TIME_IN_MILLIS).toString()) + ); + + assertEquals( + translogTransferStats.uploadBytesMovingAverage, + ((Map) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.UPLOAD_SIZE_IN_BYTES)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ) + ); + assertEquals( + translogTransferStats.uploadBytesPerSecMovingAverage, + ((Map) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.UPLOAD_SPEED_IN_BYTES_PER_SEC)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ) + ); + assertEquals( + translogTransferStats.uploadTimeMovingAverage, + ((Map) tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.UPLOAD_TIME_IN_MILLIS)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ) + ); + } else { + assertNull(tlogUploadStatsObj.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS)); + } + + Map tlogDownloadStatsObj = (Map) tlogStatsObj.get(RemoteStoreStats.SubFields.DOWNLOAD); + if (translogTransferStats.totalDownloadsSucceeded > 0) { + assertEquals( + translogTransferStats.lastSuccessfulDownloadTimestamp, + Long.parseLong(tlogDownloadStatsObj.get(RemoteStoreStats.DownloadStatsFields.LAST_SUCCESSFUL_DOWNLOAD_TIMESTAMP).toString()) + ); + assertEquals( + translogTransferStats.totalDownloadsSucceeded, + Long.parseLong( + ((Map) tlogDownloadStatsObj.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOADS)).get( + RemoteStoreStats.SubFields.SUCCEEDED + ).toString() + ) + ); + assertEquals( + translogTransferStats.downloadBytesSucceeded, + Long.parseLong( + ((Map) tlogDownloadStatsObj.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOAD_SIZE)).get( + RemoteStoreStats.SubFields.SUCCEEDED_BYTES + ).toString() + ) + ); + assertEquals( + translogTransferStats.totalDownloadTimeInMillis, + Long.parseLong(tlogDownloadStatsObj.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOAD_TIME_IN_MILLIS).toString()) + ); + + assertEquals( + translogTransferStats.downloadBytesMovingAverage, + ((Map) tlogDownloadStatsObj.get(RemoteStoreStats.DownloadStatsFields.DOWNLOAD_SIZE_IN_BYTES)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ) + ); + assertEquals( + translogTransferStats.downloadBytesPerSecMovingAverage, + ((Map) tlogDownloadStatsObj.get(RemoteStoreStats.DownloadStatsFields.DOWNLOAD_SPEED_IN_BYTES_PER_SEC)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ) + ); + assertEquals( + translogTransferStats.downloadTimeMovingAverage, + ((Map) tlogDownloadStatsObj.get(RemoteStoreStats.DownloadStatsFields.DOWNLOAD_TIME_IN_MILLIS)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ) + ); + } else { + assertNull(tlogDownloadStatsObj.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOAD_SIZE)); + } } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTests.java index 53a3b8e26a902..1c78539a00a07 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTests.java @@ -17,6 +17,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -25,10 +26,12 @@ import java.util.Map; import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.compareStatsResponse; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createEmptyTranslogStats; import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createShardRouting; import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForNewPrimary; import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForNewReplica; import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForRemoteStoreRestoredPrimary; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createTranslogStats; import static org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS; public class RemoteStoreStatsTests extends OpenSearchTestCase { @@ -49,70 +52,79 @@ public void tearDown() throws Exception { } public void testXContentBuilderWithPrimaryShard() throws IOException { - RemoteSegmentTransferTracker.Stats segmentStats = createStatsForNewPrimary(shardId); + RemoteSegmentTransferTracker.Stats segmentTransferStats = createStatsForNewPrimary(shardId); + RemoteTranslogTransferTracker.Stats translogTransferStats = createTranslogStats(shardId); ShardRouting routing = createShardRouting(shardId, true); - RemoteStoreStats stats = new RemoteStoreStats(segmentStats, routing); + RemoteStoreStats stats = new RemoteStoreStats(segmentTransferStats, translogTransferStats, routing); XContentBuilder builder = XContentFactory.jsonBuilder(); stats.toXContent(builder, EMPTY_PARAMS); Map jsonObject = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); - compareStatsResponse(jsonObject, segmentStats, routing); + compareStatsResponse(jsonObject, segmentTransferStats, translogTransferStats, routing); } public void testXContentBuilderWithReplicaShard() throws IOException { - RemoteSegmentTransferTracker.Stats segmentStats = createStatsForNewReplica(shardId); + RemoteSegmentTransferTracker.Stats segmentTransferStats = createStatsForNewReplica(shardId); + RemoteTranslogTransferTracker.Stats translogTransferStats = createEmptyTranslogStats(shardId); ShardRouting routing = createShardRouting(shardId, false); - RemoteStoreStats stats = new RemoteStoreStats(segmentStats, routing); + RemoteStoreStats stats = new RemoteStoreStats(segmentTransferStats, translogTransferStats, routing); XContentBuilder builder = XContentFactory.jsonBuilder(); stats.toXContent(builder, EMPTY_PARAMS); Map jsonObject = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); - compareStatsResponse(jsonObject, segmentStats, routing); + compareStatsResponse(jsonObject, segmentTransferStats, translogTransferStats, routing); } public void testXContentBuilderWithRemoteStoreRestoredShard() throws IOException { - RemoteSegmentTransferTracker.Stats segmentStats = createStatsForRemoteStoreRestoredPrimary(shardId); + RemoteSegmentTransferTracker.Stats segmentTransferStats = createStatsForRemoteStoreRestoredPrimary(shardId); + RemoteTranslogTransferTracker.Stats translogTransferStats = createTranslogStats(shardId); ShardRouting routing = createShardRouting(shardId, true); - RemoteStoreStats stats = new RemoteStoreStats(segmentStats, routing); + RemoteStoreStats stats = new RemoteStoreStats(segmentTransferStats, translogTransferStats, routing); XContentBuilder builder = XContentFactory.jsonBuilder(); stats.toXContent(builder, EMPTY_PARAMS); Map jsonObject = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); - compareStatsResponse(jsonObject, segmentStats, routing); + compareStatsResponse(jsonObject, segmentTransferStats, translogTransferStats, routing); } public void testSerializationForPrimaryShard() throws Exception { - RemoteSegmentTransferTracker.Stats segmentStats = createStatsForNewPrimary(shardId); - RemoteStoreStats stats = new RemoteStoreStats(segmentStats, createShardRouting(shardId, true)); + RemoteSegmentTransferTracker.Stats segmentTransferStats = createStatsForNewPrimary(shardId); + RemoteTranslogTransferTracker.Stats translogTransferStats = createTranslogStats(shardId); + RemoteStoreStats stats = new RemoteStoreStats(segmentTransferStats, translogTransferStats, createShardRouting(shardId, true)); try (BytesStreamOutput out = new BytesStreamOutput()) { stats.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { RemoteStoreStats deserializedStats = new RemoteStoreStats(in); assertEquals(stats.getSegmentStats(), deserializedStats.getSegmentStats()); + assertEquals(stats.getTranslogStats(), deserializedStats.getTranslogStats()); } } } public void testSerializationForReplicaShard() throws Exception { RemoteSegmentTransferTracker.Stats replicaShardStats = createStatsForNewReplica(shardId); - RemoteStoreStats stats = new RemoteStoreStats(replicaShardStats, createShardRouting(shardId, false)); + RemoteTranslogTransferTracker.Stats translogTransferStats = createEmptyTranslogStats(shardId); + RemoteStoreStats stats = new RemoteStoreStats(replicaShardStats, translogTransferStats, createShardRouting(shardId, false)); try (BytesStreamOutput out = new BytesStreamOutput()) { stats.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { RemoteStoreStats deserializedStats = new RemoteStoreStats(in); assertEquals(stats.getSegmentStats(), deserializedStats.getSegmentStats()); + assertEquals(stats.getTranslogStats(), deserializedStats.getTranslogStats()); } } } public void testSerializationForRemoteStoreRestoredPrimaryShard() throws Exception { RemoteSegmentTransferTracker.Stats primaryShardStats = createStatsForRemoteStoreRestoredPrimary(shardId); - RemoteStoreStats stats = new RemoteStoreStats(primaryShardStats, createShardRouting(shardId, true)); + RemoteTranslogTransferTracker.Stats translogTransferStats = createTranslogStats(shardId); + RemoteStoreStats stats = new RemoteStoreStats(primaryShardStats, translogTransferStats, createShardRouting(shardId, true)); try (BytesStreamOutput out = new BytesStreamOutput()) { stats.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { RemoteStoreStats deserializedStats = new RemoteStoreStats(in); assertEquals(stats.getSegmentStats(), deserializedStats.getSegmentStats()); + assertEquals(stats.getTranslogStats(), deserializedStats.getTranslogStats()); } } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java index c34f55d62fe89..ed73c2ef6ace5 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java @@ -23,16 +23,15 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.index.Index; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.remote.RemoteSegmentTransferTracker; -import org.opensearch.index.remote.RemoteStorePressureService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.test.FeatureFlagSetter; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransport; import org.opensearch.transport.TransportService; @@ -53,7 +52,7 @@ public class TransportRemoteStoreStatsActionTests extends IndexShardTestCase { private IndicesService indicesService; - private RemoteStorePressureService pressureService; + private RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; private IndexMetadata remoteStoreIndexMetadata; private TransportService transportService; private ClusterService clusterService; @@ -67,7 +66,7 @@ public void setUp() throws Exception { indicesService = mock(IndicesService.class); IndexService indexService = mock(IndexService.class); clusterService = mock(ClusterService.class); - pressureService = mock(RemoteStorePressureService.class); + remoteStoreStatsTrackerFactory = mock(RemoteStoreStatsTrackerFactory.class); MockTransport mockTransport = new MockTransport(); localNode = new DiscoveryNode("node0", buildNewFakeTransportAddress(), Version.CURRENT); remoteStoreIndexMetadata = IndexMetadata.builder(INDEX.getName()) @@ -87,10 +86,11 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); - when(pressureService.getRemoteRefreshSegmentTracker(any())).thenReturn(mock(RemoteSegmentTransferTracker.class)); + when(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(any())).thenReturn(mock(RemoteSegmentTransferTracker.class)); when(indicesService.indexService(INDEX)).thenReturn(indexService); when(indexService.getIndexSettings()).thenReturn(new IndexSettings(remoteStoreIndexMetadata, Settings.EMPTY)); statsAction = new TransportRemoteStoreStatsAction( @@ -99,7 +99,7 @@ public void setUp() throws Exception { indicesService, mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), - pressureService + remoteStoreStatsTrackerFactory ); } @@ -113,7 +113,6 @@ public void tearDown() throws Exception { } public void testAllShardCopies() throws Exception { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); RoutingTable routingTable = RoutingTable.builder().addAsNew(remoteStoreIndexMetadata).build(); Metadata metadata = Metadata.builder().put(remoteStoreIndexMetadata, false).build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).routingTable(routingTable).build(); @@ -133,7 +132,6 @@ public void testAllShardCopies() throws Exception { } public void testOnlyLocalShards() throws Exception { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); String[] concreteIndices = new String[] { INDEX.getName() }; RoutingTable routingTable = spy(RoutingTable.builder().addAsNew(remoteStoreIndexMetadata).build()); doReturn(new PlainShardsIterator(routingTable.allShards(INDEX.getName()).stream().map(Mockito::spy).collect(Collectors.toList()))) @@ -161,7 +159,6 @@ public void testOnlyLocalShards() throws Exception { } public void testOnlyRemoteStoreEnabledShardCopies() throws Exception { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); Index NEW_INDEX = new Index("newIndex", "newUUID"); IndexMetadata indexMetadataWithoutRemoteStore = IndexMetadata.builder(NEW_INDEX.getName()) .settings( diff --git a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index 371f326617b61..ef26bc225b0c7 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -62,6 +62,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ReplicationGroup; import org.opensearch.indices.IndicesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -139,7 +140,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java index 8f4e0e3b75e32..2d9ec2b6d3c02 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexActionTests.java @@ -46,6 +46,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; import org.opensearch.indices.IndicesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -85,7 +86,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java index 67846efab2af8..d35c821b41aa0 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/segments/IndicesSegmentsRequestTests.java @@ -34,7 +34,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.settings.Settings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.indices.IndexClosedException; import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalSettingsPlugin; @@ -56,7 +56,7 @@ protected Collection> getPlugins() { public void setupIndex() { Settings settings = Settings.builder() // don't allow any merges so that the num docs is the expected segments - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .build(); createIndex("test", settings); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsActionTests.java index d8e0f96292e27..f2b6688716e70 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/settings/get/GetSettingsActionTests.java @@ -45,6 +45,7 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -108,7 +109,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index 9ef0f85893fc8..cf7080ab2fc06 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -54,6 +54,7 @@ import org.opensearch.index.VersionType; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.ThreadPool; @@ -154,7 +155,9 @@ private void indicesThatCannotBeCreatedTestCase( Settings.EMPTY, new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) ), - new SystemIndices(emptyMap()) + null, + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ) { @Override void executeBulk( diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java index 5410d6a88a5b9..141c630b94020 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionIngestTests.java @@ -70,6 +70,7 @@ import org.opensearch.indices.SystemIndices; import org.opensearch.ingest.IngestService; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.ThreadPool; @@ -171,7 +172,9 @@ class TestTransportBulkAction extends TransportBulkAction { SETTINGS, new ClusterService(SETTINGS, new ClusterSettings(SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null) ), - new SystemIndices(emptyMap()) + null, + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java index 1229584fa99ac..6bbd740df7f9c 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTests.java @@ -60,8 +60,10 @@ import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.VersionType; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndexDescriptor; import org.opensearch.indices.SystemIndices; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.transport.CapturingTransport; @@ -87,6 +89,7 @@ import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; public class TransportBulkActionTests extends OpenSearchTestCase { @@ -114,7 +117,9 @@ class TestTransportBulkAction extends TransportBulkAction { new Resolver(), new AutoCreateIndex(Settings.EMPTY, clusterService.getClusterSettings(), new Resolver(), new SystemIndices(emptyMap())), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + mock(IndicesService.class), + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } @@ -153,7 +158,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java index b9dca5f2573f3..9d5b4430ea395 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportBulkActionTookTests.java @@ -57,6 +57,7 @@ import org.opensearch.index.IndexingPressureService; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.transport.CapturingTransport; @@ -125,7 +126,8 @@ private TransportBulkAction createAction(boolean controlled, AtomicLong expected TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -278,8 +280,10 @@ static class TestTransportBulkAction extends TransportBulkAction { indexNameExpressionResolver, autoCreateIndex, new IndexingPressureService(Settings.EMPTY, clusterService), + null, new SystemIndices(emptyMap()), - relativeTimeProvider + relativeTimeProvider, + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java index fe0fdd07025d9..b325cfa197933 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java @@ -88,6 +88,7 @@ import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; @@ -1074,7 +1075,8 @@ public void testHandlePrimaryTermValidationRequestWithDifferentAllocationId() { mock(IndexingPressureService.class), mock(SegmentReplicationPressureService.class), mock(RemoteStorePressureService.class), - mock(SystemIndices.class) + mock(SystemIndices.class), + NoopTracer.INSTANCE ); action.handlePrimaryTermValidationRequest( new TransportShardBulkAction.PrimaryTermValidationRequest(aId + "-1", 1, shardId), @@ -1105,7 +1107,8 @@ public void testHandlePrimaryTermValidationRequestWithOlderPrimaryTerm() { mock(IndexingPressureService.class), mock(SegmentReplicationPressureService.class), mock(RemoteStorePressureService.class), - mock(SystemIndices.class) + mock(SystemIndices.class), + NoopTracer.INSTANCE ); action.handlePrimaryTermValidationRequest( new TransportShardBulkAction.PrimaryTermValidationRequest(aId, 1, shardId), @@ -1136,7 +1139,8 @@ public void testHandlePrimaryTermValidationRequestSuccess() { mock(IndexingPressureService.class), mock(SegmentReplicationPressureService.class), mock(RemoteStorePressureService.class), - mock(SystemIndices.class) + mock(SystemIndices.class), + NoopTracer.INSTANCE ); action.handlePrimaryTermValidationRequest( new TransportShardBulkAction.PrimaryTermValidationRequest(aId, 1, shardId), @@ -1178,7 +1182,8 @@ private TransportShardBulkAction createAction() { mock(IndexingPressureService.class), mock(SegmentReplicationPressureService.class), mock(RemoteStorePressureService.class), - mock(SystemIndices.class) + mock(SystemIndices.class), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/get/TransportGetActionTests.java b/server/src/test/java/org/opensearch/action/get/TransportGetActionTests.java index 2eca49fb3032f..9565e219d1a78 100644 --- a/server/src/test/java/org/opensearch/action/get/TransportGetActionTests.java +++ b/server/src/test/java/org/opensearch/action/get/TransportGetActionTests.java @@ -67,24 +67,24 @@ private static ClusterState clusterState(ReplicationType replicationType) { public void testShouldForcePrimaryRouting() { - ClusterState clusterState = clusterState(ReplicationType.SEGMENT); + Metadata metadata = clusterState(ReplicationType.SEGMENT).getMetadata(); // should return false since preference is set for request - assertFalse(TransportGetAction.shouldForcePrimaryRouting(clusterState, true, Preference.REPLICA.type(), "index1")); + assertFalse(TransportGetAction.shouldForcePrimaryRouting(metadata, true, Preference.REPLICA.type(), "index1")); // should return false since request is not realtime - assertFalse(TransportGetAction.shouldForcePrimaryRouting(clusterState, false, null, "index1")); + assertFalse(TransportGetAction.shouldForcePrimaryRouting(metadata, false, null, "index1")); // should return true since segment replication is enabled - assertTrue(TransportGetAction.shouldForcePrimaryRouting(clusterState, true, null, "index1")); + assertTrue(TransportGetAction.shouldForcePrimaryRouting(metadata, true, null, "index1")); // should return false since index doesn't exist - assertFalse(TransportGetAction.shouldForcePrimaryRouting(clusterState, true, null, "index3")); + assertFalse(TransportGetAction.shouldForcePrimaryRouting(metadata, true, null, "index3")); - clusterState = clusterState(ReplicationType.DOCUMENT); + metadata = clusterState(ReplicationType.DOCUMENT).getMetadata(); // should fail since document replication enabled - assertFalse(TransportGetAction.shouldForcePrimaryRouting(clusterState, true, null, "index1")); + assertFalse(TransportGetAction.shouldForcePrimaryRouting(metadata, true, null, "index1")); } diff --git a/server/src/test/java/org/opensearch/action/get/TransportMultiGetActionTests.java b/server/src/test/java/org/opensearch/action/get/TransportMultiGetActionTests.java index c9f40e0acc56c..52443e695e014 100644 --- a/server/src/test/java/org/opensearch/action/get/TransportMultiGetActionTests.java +++ b/server/src/test/java/org/opensearch/action/get/TransportMultiGetActionTests.java @@ -44,6 +44,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.OperationRouting; +import org.opensearch.cluster.routing.Preference; import org.opensearch.cluster.routing.ShardIterator; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; @@ -58,8 +59,10 @@ import org.opensearch.core.tasks.TaskId; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -68,6 +71,7 @@ import org.junit.AfterClass; import org.junit.BeforeClass; +import java.io.IOException; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -91,32 +95,8 @@ public class TransportMultiGetActionTests extends OpenSearchTestCase { private static TransportMultiGetAction transportAction; private static TransportShardMultiGetAction shardAction; - @BeforeClass - public static void beforeClass() throws Exception { - threadPool = new TestThreadPool(TransportMultiGetActionTests.class.getSimpleName()); - - transportService = new TransportService( - Settings.EMPTY, - mock(Transport.class), - threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, - boundAddress -> DiscoveryNode.createLocal( - Settings.builder().put("node.name", "node1").build(), - boundAddress.publishAddress(), - randomBase64UUID() - ), - null, - emptySet() - ) { - @Override - public TaskManager getTaskManager() { - return taskManager; - } - }; - - final Index index1 = new Index("index1", randomBase64UUID()); - final Index index2 = new Index("index2", randomBase64UUID()); - final ClusterState clusterState = ClusterState.builder(new ClusterName(TransportMultiGetActionTests.class.getSimpleName())) + private static ClusterState clusterState(ReplicationType replicationType, Index index1, Index index2) throws IOException { + return ClusterState.builder(new ClusterName(TransportMultiGetActionTests.class.getSimpleName())) .metadata( new Metadata.Builder().put( new IndexMetadata.Builder(index1.getName()).settings( @@ -124,6 +104,7 @@ public TaskManager getTaskManager() { .put("index.version.created", Version.CURRENT) .put("index.number_of_shards", 1) .put("index.number_of_replicas", 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, replicationType) .put(IndexMetadata.SETTING_INDEX_UUID, index1.getUUID()) ) .putMapping( @@ -149,6 +130,7 @@ public TaskManager getTaskManager() { .put("index.version.created", Version.CURRENT) .put("index.number_of_shards", 1) .put("index.number_of_replicas", 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, replicationType) .put(IndexMetadata.SETTING_INDEX_UUID, index1.getUUID()) ) .putMapping( @@ -170,6 +152,35 @@ public TaskManager getTaskManager() { ) ) .build(); + } + + @BeforeClass + public static void beforeClass() throws Exception { + threadPool = new TestThreadPool(TransportMultiGetActionTests.class.getSimpleName()); + + transportService = new TransportService( + Settings.EMPTY, + mock(Transport.class), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundAddress -> DiscoveryNode.createLocal( + Settings.builder().put("node.name", "node1").build(), + boundAddress.publishAddress(), + randomBase64UUID() + ), + null, + emptySet(), + NoopTracer.INSTANCE + ) { + @Override + public TaskManager getTaskManager() { + return taskManager; + } + }; + + final Index index1 = new Index("index1", randomBase64UUID()); + final Index index2 = new Index("index2", randomBase64UUID()); + ClusterState clusterState = clusterState(randomBoolean() ? ReplicationType.SEGMENT : ReplicationType.DOCUMENT, index1, index2); final ShardIterator index1ShardIterator = mock(ShardIterator.class); when(index1ShardIterator.shardId()).thenReturn(new ShardId(index1, randomInt())); @@ -285,6 +296,30 @@ protected void executeShardAction( } + public void testShouldForcePrimaryRouting() throws IOException { + final Index index1 = new Index("index1", randomBase64UUID()); + final Index index2 = new Index("index2", randomBase64UUID()); + Metadata metadata = clusterState(ReplicationType.SEGMENT, index1, index2).getMetadata(); + + // should return false since preference is set for request + assertFalse(TransportMultiGetAction.shouldForcePrimaryRouting(metadata, true, Preference.REPLICA.type(), "index1")); + + // should return false since request is not realtime + assertFalse(TransportMultiGetAction.shouldForcePrimaryRouting(metadata, false, null, "index2")); + + // should return true since segment replication is enabled + assertTrue(TransportMultiGetAction.shouldForcePrimaryRouting(metadata, true, null, "index1")); + + // should return false since index doesn't exist + assertFalse(TransportMultiGetAction.shouldForcePrimaryRouting(metadata, true, null, "index3")); + + metadata = clusterState(ReplicationType.DOCUMENT, index1, index2).getMetadata(); + + // should fail since document replication enabled + assertFalse(TransportGetAction.shouldForcePrimaryRouting(metadata, true, null, "index1")); + + } + private static Task createTask() { return new Task( randomLong(), diff --git a/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java b/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java index 705fb546a2fed..41f782e308785 100644 --- a/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java +++ b/server/src/test/java/org/opensearch/action/ingest/SimulatePipelineRequestParsingTests.java @@ -85,7 +85,7 @@ public void init() throws IOException { when(ingestService.getProcessorFactories()).thenReturn(registry); } - public void testParseUsingPipelineStore(boolean useExplicitType) throws Exception { + public void testParseUsingPipelineStore() throws Exception { int numDocs = randomIntBetween(1, 10); Map requestContent = new HashMap<>(); @@ -131,7 +131,7 @@ public void testParseUsingPipelineStore(boolean useExplicitType) throws Exceptio assertThat(actualRequest.getPipeline().getProcessors().size(), equalTo(1)); } - public void innerTestParseWithProvidedPipeline() throws Exception { + public void testParseWithProvidedPipeline() throws Exception { int numDocs = randomIntBetween(1, 10); Map requestContent = new HashMap<>(); @@ -144,17 +144,29 @@ public void innerTestParseWithProvidedPipeline() throws Exception { List fields = Arrays.asList(INDEX, ID, ROUTING, VERSION, VERSION_TYPE, IF_SEQ_NO, IF_PRIMARY_TERM); for (IngestDocument.Metadata field : fields) { if (field == VERSION) { - Long value = randomLong(); - doc.put(field.getFieldName(), value); - expectedDoc.put(field.getFieldName(), value); + if (randomBoolean()) { + Long value = randomLong(); + doc.put(field.getFieldName(), value); + expectedDoc.put(field.getFieldName(), value); + } else { + int value = randomIntBetween(1, 1000000); + doc.put(field.getFieldName(), value); + expectedDoc.put(field.getFieldName(), (long) value); + } } else if (field == VERSION_TYPE) { String value = VersionType.toString(randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE)); doc.put(field.getFieldName(), value); expectedDoc.put(field.getFieldName(), value); } else if (field == IF_SEQ_NO || field == IF_PRIMARY_TERM) { - Long value = randomNonNegativeLong(); - doc.put(field.getFieldName(), value); - expectedDoc.put(field.getFieldName(), value); + if (randomBoolean()) { + Long value = randomNonNegativeLong(); + doc.put(field.getFieldName(), value); + expectedDoc.put(field.getFieldName(), value); + } else { + int value = randomIntBetween(1, 1000000); + doc.put(field.getFieldName(), value); + expectedDoc.put(field.getFieldName(), (long) value); + } } else { if (randomBoolean()) { String value = randomAlphaOfLengthBetween(1, 10); @@ -282,4 +294,40 @@ public void testNotValidDocs() { ); assertThat(e3.getMessage(), containsString("required property is missing")); } + + public void testNotValidMetadataFields() { + List fields = Arrays.asList(VERSION, IF_SEQ_NO, IF_PRIMARY_TERM); + for (IngestDocument.Metadata field : fields) { + String metadataFieldName = field.getFieldName(); + Map requestContent = new HashMap<>(); + List> docs = new ArrayList<>(); + requestContent.put(Fields.DOCS, docs); + Map doc = new HashMap<>(); + doc.put(metadataFieldName, randomAlphaOfLengthBetween(1, 10)); + doc.put(Fields.SOURCE, Collections.singletonMap(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10))); + docs.add(doc); + + Map pipelineConfig = new HashMap<>(); + List> processors = new ArrayList<>(); + Map processorConfig = new HashMap<>(); + List> onFailureProcessors = new ArrayList<>(); + int numOnFailureProcessors = randomIntBetween(0, 1); + for (int j = 0; j < numOnFailureProcessors; j++) { + onFailureProcessors.add(Collections.singletonMap("mock_processor", Collections.emptyMap())); + } + if (numOnFailureProcessors > 0) { + processorConfig.put("on_failure", onFailureProcessors); + } + processors.add(Collections.singletonMap("mock_processor", processorConfig)); + pipelineConfig.put("processors", processors); + + requestContent.put(Fields.PIPELINE, pipelineConfig); + + assertThrows( + "Failed to parse parameter [" + metadataFieldName + "], only int or long is accepted", + IllegalArgumentException.class, + () -> SimulatePipelineRequest.parse(requestContent, false, ingestService) + ); + } + } } diff --git a/server/src/test/java/org/opensearch/action/main/MainActionTests.java b/server/src/test/java/org/opensearch/action/main/MainActionTests.java index 9c033a98913b1..e6b79d2dd4d51 100644 --- a/server/src/test/java/org/opensearch/action/main/MainActionTests.java +++ b/server/src/test/java/org/opensearch/action/main/MainActionTests.java @@ -45,6 +45,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.rest.RestStatus; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; @@ -111,7 +112,8 @@ public void testMainActionClusterAvailable() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); TransportMainAction action = new TransportMainAction(settings, transportService, mock(ActionFilters.class), clusterService); AtomicReference responseRef = new AtomicReference<>(); @@ -148,7 +150,8 @@ public void testMainResponseVersionOverrideEnabledByConfigSetting() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); final Settings settings = Settings.builder().put("node.name", "my-node").put(OVERRIDE_MAIN_RESPONSE_VERSION_KEY, true).build(); diff --git a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java index e88cc4c5b1d52..da87a0a967f53 100644 --- a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java @@ -64,6 +64,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -134,7 +135,8 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { new NetworkService(emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) ) { @@ -145,7 +147,8 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -200,7 +203,8 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { shardStateAction, new ActionFilters(new HashSet<>()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); assertThat(action.globalBlockLevel(), nullValue()); @@ -253,7 +257,8 @@ private TransportResyncReplicationAction createAction() { mock(ShardStateAction.class), new ActionFilters(new HashSet<>()), mock(IndexingPressureService.class), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } } diff --git a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java index 705479ec21fc1..edac50813e191 100644 --- a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java @@ -38,8 +38,12 @@ import org.opensearch.cluster.routing.GroupShardsIterator; import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.set.Sets; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.query.MatchAllQueryBuilder; @@ -51,7 +55,10 @@ import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; import org.junit.After; import org.junit.Before; @@ -65,6 +72,7 @@ import java.util.UUID; import java.util.concurrent.CopyOnWriteArraySet; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; @@ -77,18 +85,21 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; public class AbstractSearchAsyncActionTests extends OpenSearchTestCase { private final List> resolvedNodes = new ArrayList<>(); private final Set releasedContexts = new CopyOnWriteArraySet<>(); private ExecutorService executor; + ThreadPool threadPool; @Before @Override public void setUp() throws Exception { super.setUp(); executor = Executors.newFixedThreadPool(1); + threadPool = new TestThreadPool(getClass().getName()); } @After @@ -97,6 +108,7 @@ public void tearDown() throws Exception { super.tearDown(); executor.shutdown(); assertTrue(executor.awaitTermination(1, TimeUnit.SECONDS)); + ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); } private AbstractSearchAsyncAction createAction( @@ -126,6 +138,7 @@ private AbstractSearchAsyncAction createAction( final AtomicLong expected, final SearchShardIterator... shards ) { + final Runnable runnable; final TransportSearchAction.SearchTimeProvider timeProvider; if (controlled) { @@ -161,7 +174,8 @@ private AbstractSearchAsyncAction createAction( null, results, request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { @Override protected SearchPhase getNextPhase(final SearchPhaseResults results, SearchPhaseContext context) { @@ -313,6 +327,53 @@ public void testSendSearchResponseDisallowPartialFailures() { assertEquals(requestIds, releasedContexts); } + public void testOnPhaseFailureAndVerifyListeners() { + SearchRequestStats testListener = new SearchRequestStats(); + + final List requestOperationListeners = new ArrayList<>(List.of(testListener)); + SearchQueryThenFetchAsyncAction action = createSearchQueryThenFetchAsyncAction(requestOperationListeners); + action.start(); + assertEquals(1, testListener.getPhaseCurrent(action.getSearchPhaseName())); + action.onPhaseFailure(new SearchPhase("test") { + @Override + public void run() { + + } + }, "message", null); + assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseTotal(action.getSearchPhaseName())); + + SearchDfsQueryThenFetchAsyncAction searchDfsQueryThenFetchAsyncAction = createSearchDfsQueryThenFetchAsyncAction( + requestOperationListeners + ); + searchDfsQueryThenFetchAsyncAction.start(); + assertEquals(1, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + searchDfsQueryThenFetchAsyncAction.onPhaseFailure(new SearchPhase("test") { + @Override + public void run() { + + } + }, "message", null); + assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseTotal(action.getSearchPhaseName())); + + FetchSearchPhase fetchPhase = createFetchSearchPhase(); + ShardId shardId = new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), randomInt()); + SearchShardIterator searchShardIterator = new SearchShardIterator(null, shardId, Collections.emptyList(), OriginalIndices.NONE); + searchShardIterator.resetAndSkip(); + action.skipShard(searchShardIterator); + action.executeNextPhase(action, fetchPhase); + assertEquals(1, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + action.onPhaseFailure(new SearchPhase("test") { + @Override + public void run() { + + } + }, "message", null); + assertEquals(0, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseTotal(fetchPhase.getSearchPhaseName())); + } + public void testOnPhaseFailure() { SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false); AtomicReference exception = new AtomicReference<>(); @@ -321,6 +382,7 @@ public void testOnPhaseFailure() { List> nodeLookups = new ArrayList<>(); ArraySearchPhaseResults phaseResults = phaseResults(requestIds, nodeLookups, 0); AbstractSearchAsyncAction action = createAction(searchRequest, phaseResults, listener, false, new AtomicLong()); + action.onPhaseFailure(new SearchPhase("test") { @Override public void run() { @@ -528,6 +590,224 @@ public void onFailure(Exception e) { assertThat(searchResponse.getSuccessfulShards(), equalTo(shards.length)); } + public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedException { + SearchRequestStats testListener = new SearchRequestStats(); + final List requestOperationListeners = new ArrayList<>(List.of(testListener)); + + long delay = (randomIntBetween(1, 5)); + delay = delay * 10; + + SearchQueryThenFetchAsyncAction action = createSearchQueryThenFetchAsyncAction(requestOperationListeners); + action.start(); + + // Verify queryPhase current metric + assertEquals(1, testListener.getPhaseCurrent(action.getSearchPhaseName())); + TimeUnit.MILLISECONDS.sleep(delay); + + FetchSearchPhase fetchPhase = createFetchSearchPhase(); + ShardId shardId = new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), randomInt()); + SearchShardIterator searchShardIterator = new SearchShardIterator(null, shardId, Collections.emptyList(), OriginalIndices.NONE); + searchShardIterator.resetAndSkip(); + action.skipShard(searchShardIterator); + action.executeNextPhase(action, fetchPhase); + + // Verify queryPhase total, current and latency metrics + assertEquals(0, testListener.getPhaseCurrent(action.getSearchPhaseName())); + assertThat(testListener.getPhaseMetric(action.getSearchPhaseName()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(action.getSearchPhaseName())); + + // Verify fetchPhase current metric + assertEquals(1, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + TimeUnit.MILLISECONDS.sleep(delay); + + ExpandSearchPhase expandPhase = createExpandSearchPhase(); + action.executeNextPhase(fetchPhase, expandPhase); + TimeUnit.MILLISECONDS.sleep(delay); + + // Verify fetchPhase total, current and latency metrics + assertThat(testListener.getPhaseMetric(fetchPhase.getSearchPhaseName()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(fetchPhase.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseCurrent(fetchPhase.getSearchPhaseName())); + + assertEquals(1, testListener.getPhaseCurrent(expandPhase.getSearchPhaseName())); + + action.executeNextPhase(expandPhase, fetchPhase); + + action.sendSearchResponse(mock(InternalSearchResponse.class), mock(String.valueOf(QuerySearchResult.class))); + assertThat(testListener.getPhaseMetric(expandPhase.getSearchPhaseName()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(expandPhase.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseCurrent(expandPhase.getSearchPhaseName())); + } + + public void testOnPhaseListenersWithDfsType() throws InterruptedException { + SearchRequestStats testListener = new SearchRequestStats(); + final List requestOperationListeners = new ArrayList<>(List.of(testListener)); + + SearchDfsQueryThenFetchAsyncAction searchDfsQueryThenFetchAsyncAction = createSearchDfsQueryThenFetchAsyncAction( + requestOperationListeners + ); + long delay = (randomIntBetween(1, 5)); + + FetchSearchPhase fetchPhase = createFetchSearchPhase(); + searchDfsQueryThenFetchAsyncAction.start(); + assertEquals(1, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + TimeUnit.MILLISECONDS.sleep(delay); + ShardId shardId = new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), randomInt()); + SearchShardIterator searchShardIterator = new SearchShardIterator(null, shardId, Collections.emptyList(), OriginalIndices.NONE); + searchShardIterator.resetAndSkip(); + + searchDfsQueryThenFetchAsyncAction.skipShard(searchShardIterator); + searchDfsQueryThenFetchAsyncAction.executeNextPhase(searchDfsQueryThenFetchAsyncAction, fetchPhase); + + assertThat(testListener.getPhaseMetric(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName()), greaterThanOrEqualTo(delay)); + assertEquals(1, testListener.getPhaseTotal(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + assertEquals(0, testListener.getPhaseCurrent(searchDfsQueryThenFetchAsyncAction.getSearchPhaseName())); + } + + private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAction( + List searchRequestOperationsListeners + ) { + SearchPhaseController controller = new SearchPhaseController( + writableRegistry(), + r -> InternalAggregationTestCase.emptyReduceContextBuilder() + ); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap()); + Executor executor = OpenSearchExecutors.newDirectExecutorService(); + SearchShardIterator shards = new SearchShardIterator(null, null, Collections.emptyList(), null); + GroupShardsIterator shardsIter = new GroupShardsIterator<>(List.of(shards)); + QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( + searchRequest, + executor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + controller, + task.getProgressListener(), + writableRegistry(), + shardsIter.size(), + exc -> {} + ); + AtomicReference exception = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); + TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + 0, + System.nanoTime(), + System::nanoTime + ); + return new SearchDfsQueryThenFetchAsyncAction( + logger, + null, + null, + null, + null, + null, + null, + executor, + resultConsumer, + searchRequest, + listener, + shardsIter, + timeProvider, + null, + task, + SearchResponse.Clusters.EMPTY, + new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger) + ); + } + + private SearchQueryThenFetchAsyncAction createSearchQueryThenFetchAsyncAction( + List searchRequestOperationsListeners + ) { + SearchPhaseController controller = new SearchPhaseController( + writableRegistry(), + r -> InternalAggregationTestCase.emptyReduceContextBuilder() + ); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap()); + Executor executor = OpenSearchExecutors.newDirectExecutorService(); + SearchShardIterator shards = new SearchShardIterator(null, null, Collections.emptyList(), null); + GroupShardsIterator shardsIter = new GroupShardsIterator<>(List.of(shards)); + QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( + searchRequest, + executor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + controller, + task.getProgressListener(), + writableRegistry(), + shardsIter.size(), + exc -> {} + ); + AtomicReference exception = new AtomicReference<>(); + ActionListener listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); + TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + 0, + System.nanoTime(), + System::nanoTime + ); + return new SearchQueryThenFetchAsyncAction( + logger, + null, + null, + null, + null, + null, + null, + executor, + resultConsumer, + searchRequest, + listener, + shardsIter, + timeProvider, + null, + task, + SearchResponse.Clusters.EMPTY, + new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger) + ) { + @Override + ShardSearchFailure[] buildShardFailures() { + return ShardSearchFailure.EMPTY_ARRAY; + } + + @Override + public void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray queryResults) { + start(); + } + }; + } + + private FetchSearchPhase createFetchSearchPhase() { + SearchPhaseController controller = new SearchPhaseController( + writableRegistry(), + r -> InternalAggregationTestCase.emptyReduceContextBuilder() + ); + MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); + QueryPhaseResultConsumer results = controller.newSearchPhaseResults( + OpenSearchExecutors.newDirectExecutorService(), + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + SearchProgressListener.NOOP, + mockSearchPhaseContext.getRequest(), + 1, + exc -> {} + ); + return new FetchSearchPhase( + results, + controller, + null, + mockSearchPhaseContext, + (searchResponse, scrollId) -> new SearchPhase("test") { + @Override + public void run() { + mockSearchPhaseContext.sendSearchResponse(searchResponse, null); + } + } + ); + } + + private ExpandSearchPhase createExpandSearchPhase() { + MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(null, null, null, null, false, null, 1); + return new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, null); + } + private static final class PhaseResult extends SearchPhaseResult { PhaseResult(ShardSearchContextId contextId) { this.contextId = contextId; diff --git a/server/src/test/java/org/opensearch/action/search/BottomSortValuesCollectorTests.java b/server/src/test/java/org/opensearch/action/search/BottomSortValuesCollectorTests.java index 3efcadbfb320d..8042a7e296869 100644 --- a/server/src/test/java/org/opensearch/action/search/BottomSortValuesCollectorTests.java +++ b/server/src/test/java/org/opensearch/action/search/BottomSortValuesCollectorTests.java @@ -46,7 +46,6 @@ import java.time.ZoneId; import java.util.Arrays; -import static org.opensearch.index.mapper.DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.apache.lucene.search.TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; @@ -136,7 +135,11 @@ public void testWithDates() { for (boolean reverse : new boolean[] { true, false }) { SortField[] sortFields = new SortField[] { new SortField("foo", SortField.Type.LONG, reverse) }; DocValueFormat[] sortFormats = new DocValueFormat[] { - new DocValueFormat.DateTime(DEFAULT_DATE_TIME_FORMATTER, ZoneId.of("UTC"), DateFieldMapper.Resolution.MILLISECONDS) }; + new DocValueFormat.DateTime( + DateFieldMapper.getDefaultDateTimeFormatter(), + ZoneId.of("UTC"), + DateFieldMapper.Resolution.MILLISECONDS + ) }; BottomSortValuesCollector collector = new BottomSortValuesCollector(3, sortFields); collector.consumeTopDocs( createTopDocs(sortFields[0], 100, newDateArray("2017-06-01T12:18:20Z", "2018-04-03T15:10:27Z", "2013-06-01T13:10:20Z")), @@ -170,7 +173,11 @@ public void testWithDateNanos() { for (boolean reverse : new boolean[] { true, false }) { SortField[] sortFields = new SortField[] { new SortField("foo", SortField.Type.LONG, reverse) }; DocValueFormat[] sortFormats = new DocValueFormat[] { - new DocValueFormat.DateTime(DEFAULT_DATE_TIME_FORMATTER, ZoneId.of("UTC"), DateFieldMapper.Resolution.NANOSECONDS) }; + new DocValueFormat.DateTime( + DateFieldMapper.getDefaultDateTimeFormatter(), + ZoneId.of("UTC"), + DateFieldMapper.Resolution.NANOSECONDS + ) }; BottomSortValuesCollector collector = new BottomSortValuesCollector(3, sortFields); collector.consumeTopDocs( createTopDocs(sortFields[0], 100, newDateNanoArray("2017-06-01T12:18:20Z", "2018-04-03T15:10:27Z", "2013-06-01T13:10:20Z")), @@ -242,7 +249,7 @@ private Object[] newBytesArray(String... values) { private Object[] newDateArray(String... values) { Long[] longs = new Long[values.length]; for (int i = 0; i < values.length; i++) { - longs[i] = DEFAULT_DATE_TIME_FORMATTER.parseMillis(values[i]); + longs[i] = DateFieldMapper.getDefaultDateTimeFormatter().parseMillis(values[i]); } return longs; } @@ -250,7 +257,7 @@ private Object[] newDateArray(String... values) { private Object[] newDateNanoArray(String... values) { Long[] longs = new Long[values.length]; for (int i = 0; i < values.length; i++) { - longs[i] = DateUtils.toNanoSeconds(DEFAULT_DATE_TIME_FORMATTER.parseMillis(values[i])); + longs[i] = DateUtils.toNanoSeconds(DateFieldMapper.getDefaultDateTimeFormatter().parseMillis(values[i])); } return longs; } diff --git a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 45f00a8418d5c..43029fe57d5dd 100644 --- a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -136,7 +136,8 @@ public void run() throws IOException { latch.countDown(); } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ); canMatchPhase.start(); @@ -227,7 +228,8 @@ public void run() throws IOException { latch.countDown(); } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ); canMatchPhase.start(); @@ -317,7 +319,8 @@ public void sendCanMatch( null, new ArraySearchPhaseResults<>(iter.size()), randomIntBetween(1, 32), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { @Override @@ -344,7 +347,8 @@ protected void executePhaseOnShard( } } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ); canMatchPhase.start(); @@ -428,7 +432,8 @@ public void run() { latch.countDown(); } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ); canMatchPhase.start(); @@ -527,7 +532,8 @@ public void run() { latch.countDown(); } }, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ); canMatchPhase.start(); diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java index 897c2a6198eac..6cbe458a35ef8 100644 --- a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -32,6 +32,7 @@ import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -180,7 +181,7 @@ public void testUpdatePitAfterCreatePitSuccess() throws InterruptedException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -273,7 +274,7 @@ public void testUpdatePitAfterCreatePitFailure() throws InterruptedException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -338,8 +339,8 @@ public void onFailure(Exception e) { createListener.onFailure(new Exception("Exception occurred in phase 1")); latch.await(); assertEquals(0, updateNodesInvoked.size()); - /** - * cleanup is not called on create pit phase one failure + /* + cleanup is not called on create pit phase one failure */ assertEquals(0, deleteNodesInvoked.size()); } @@ -366,7 +367,7 @@ public void testUpdatePitFailureForNodeDrop() throws InterruptedException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -437,8 +438,8 @@ public void onFailure(Exception e) { createListener.onResponse(searchResponse); latch.await(); assertEquals(3, updateNodesInvoked.size()); - /** - * check if cleanup is called for all nodes in case of update pit failure + /* + check if cleanup is called for all nodes in case of update pit failure */ assertEquals(3, deleteNodesInvoked.size()); } @@ -462,7 +463,7 @@ public void testUpdatePitFailureWhereAllNodesDown() throws InterruptedException Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -525,8 +526,8 @@ public void onFailure(Exception e) { createListener.onResponse(searchResponse); latch.await(); assertEquals(3, updateNodesInvoked.size()); - /** - * check if cleanup is called for all nodes in case of update pit failure + /* + check if cleanup is called for all nodes in case of update pit failure */ assertEquals(3, deleteNodesInvoked.size()); } diff --git a/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java index e078b4a467e91..b5e1050b968ee 100644 --- a/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/opensearch/action/search/MockSearchPhaseContext.java @@ -99,6 +99,11 @@ public SearchRequest getRequest() { return searchRequest; } + @Override + public SearchPhase getCurrentPhase() { + return null; + } + @Override public void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray queryResults) { String scrollId = getRequest().scroll() != null ? TransportSearchHelper.buildScrollId(queryResults, Version.CURRENT) : null; diff --git a/server/src/test/java/org/opensearch/action/search/MultiSearchActionTookTests.java b/server/src/test/java/org/opensearch/action/search/MultiSearchActionTookTests.java index 173bee40d9ae8..94ba5b0a8768b 100644 --- a/server/src/test/java/org/opensearch/action/search/MultiSearchActionTookTests.java +++ b/server/src/test/java/org/opensearch/action/search/MultiSearchActionTookTests.java @@ -49,6 +49,7 @@ import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -153,7 +154,8 @@ private TransportMultiSearchAction createTransportMultiSearchAction(boolean cont TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ) { @Override public TaskManager getTaskManager() { diff --git a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java index 830fa99f90bb9..4b94b6589c6c8 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java @@ -135,7 +135,8 @@ public void testSkipSearchShards() throws InterruptedException { null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { @Override @@ -253,7 +254,8 @@ public void testLimitConcurrentShardRequests() throws InterruptedException { null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { @Override @@ -370,7 +372,8 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { TestSearchResponse response = new TestSearchResponse(); @@ -492,7 +495,8 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { TestSearchResponse response = new TestSearchResponse(); @@ -605,9 +609,9 @@ public void testAllowPartialResults() throws InterruptedException { null, new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { - @Override protected void executePhaseOnShard( SearchShardIterator shardIt, diff --git a/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java b/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java new file mode 100644 index 0000000000000..a2e301143d694 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java @@ -0,0 +1,228 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.BoostingQueryBuilder; +import org.opensearch.index.query.MatchNoneQueryBuilder; +import org.opensearch.index.query.MatchQueryBuilder; +import org.opensearch.index.query.MultiMatchQueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryStringQueryBuilder; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.index.query.RegexpQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.WildcardQueryBuilder; +import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; +import org.opensearch.search.aggregations.bucket.range.RangeAggregationBuilder; +import org.opensearch.search.aggregations.bucket.terms.MultiTermsAggregationBuilder; +import org.opensearch.search.aggregations.support.MultiTermsValuesSourceConfig; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.sort.ScoreSortBuilder; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.tags.Tags; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.Arrays; + +import org.mockito.Mockito; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.when; + +public final class SearchQueryCategorizerTests extends OpenSearchTestCase { + + private MetricsRegistry metricsRegistry; + + private SearchQueryCategorizer searchQueryCategorizer; + + @Before + public void setup() { + metricsRegistry = mock(MetricsRegistry.class); + when(metricsRegistry.createCounter(any(String.class), any(String.class), any(String.class))).thenAnswer( + invocation -> mock(Counter.class) + ); + searchQueryCategorizer = new SearchQueryCategorizer(metricsRegistry); + } + + public void testAggregationsQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.aggregation( + new MultiTermsAggregationBuilder("agg1").terms( + Arrays.asList( + new MultiTermsValuesSourceConfig.Builder().setFieldName("username").build(), + new MultiTermsValuesSourceConfig.Builder().setFieldName("rating").build() + ) + ) + ); + sourceBuilder.size(0); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.aggCounter).add(eq(1.0d)); + } + + public void testBoolQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(new BoolQueryBuilder().must(new MatchQueryBuilder("searchText", "fox"))); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.boolCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testFunctionScoreQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro"))); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.functionScoreCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testMatchQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "php")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testMatchPhraseQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(QueryBuilders.matchPhraseQuery("tags", "php")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchPhrasePrefixCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testMultiMatchQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(new MultiMatchQueryBuilder("foo bar", "myField")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.multiMatchCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testOtherQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + BoostingQueryBuilder queryBuilder = new BoostingQueryBuilder( + new TermQueryBuilder("unmapped_field", "foo"), + new MatchNoneQueryBuilder() + ); + sourceBuilder.query(queryBuilder); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.otherQueryCounter, times(2)).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testQueryStringQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + QueryStringQueryBuilder queryBuilder = new QueryStringQueryBuilder("foo:*"); + sourceBuilder.query(queryBuilder); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.queryStringQueryCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testRangeQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + RangeQueryBuilder rangeQuery = new RangeQueryBuilder("date"); + rangeQuery.gte("1970-01-01"); + rangeQuery.lt("1982-01-01"); + sourceBuilder.query(rangeQuery); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.rangeCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testRegexQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.query(new RegexpQueryBuilder("field", "text")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.regexCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testSortQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.query(QueryBuilders.matchQuery("tags", "ruby")); + sourceBuilder.sort("creationDate", SortOrder.DESC); + sourceBuilder.sort(new ScoreSortBuilder()); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.sortCounter, times(2)).add(eq(1.0d), any(Tags.class)); + } + + public void testTermQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(QueryBuilders.termQuery("field", "value2")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testWildcardQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + sourceBuilder.query(new WildcardQueryBuilder("field", "text")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.wildcardCounter).add(eq(1.0d), any(Tags.class)); + } + + public void testComplexQuery() { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.size(50); + + TermQueryBuilder termQueryBuilder = QueryBuilders.termQuery("field", "value2"); + MatchQueryBuilder matchQueryBuilder = QueryBuilders.matchQuery("tags", "php"); + RegexpQueryBuilder regexpQueryBuilder = new RegexpQueryBuilder("field", "text"); + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder().must(termQueryBuilder) + .filter(matchQueryBuilder) + .should(regexpQueryBuilder); + sourceBuilder.query(boolQueryBuilder); + sourceBuilder.aggregation(new RangeAggregationBuilder("agg1").field("num")); + + searchQueryCategorizer.categorize(sourceBuilder); + + Mockito.verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.regexCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.boolCounter).add(eq(1.0d), any(Tags.class)); + Mockito.verify(searchQueryCategorizer.searchQueryCounters.aggCounter).add(eq(1.0d)); + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java index 0e2780c195cb8..6a22a7ea2b5e4 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java @@ -214,7 +214,8 @@ public void sendExecuteQuery( timeProvider, null, task, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ) { @Override protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { @@ -226,6 +227,7 @@ public void run() { }; } }; + action.start(); latch.await(); assertThat(successfulOps.get(), equalTo(numShards)); diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java new file mode 100644 index 0000000000000..ef880043e863c --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerTests.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SearchRequestOperationsListenerTests extends OpenSearchTestCase { + + public void testListenersAreExecuted() { + Map searchPhaseMap = new HashMap<>(); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + searchPhaseMap.put(searchPhaseName, new SearchRequestStats.StatsHolder()); + } + SearchRequestOperationsListener testListener = new SearchRequestOperationsListener() { + + @Override + public void onPhaseStart(SearchPhaseContext context) { + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.inc(); + } + + @Override + public void onPhaseEnd(SearchPhaseContext context) { + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).total.inc(); + } + + @Override + public void onPhaseFailure(SearchPhaseContext context) { + searchPhaseMap.get(context.getCurrentPhase().getSearchPhaseName()).current.dec(); + } + }; + + int totalListeners = randomIntBetween(1, 10); + final List requestOperationListeners = new ArrayList<>(); + for (int i = 0; i < totalListeners; i++) { + requestOperationListeners.add(testListener); + } + + SearchRequestOperationsListener compositeListener = new SearchRequestOperationsListener.CompositeListener( + requestOperationListeners, + logger + ); + + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase searchPhase = mock(SearchPhase.class); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(ctx.getCurrentPhase()).thenReturn(searchPhase); + when(searchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + compositeListener.onPhaseStart(ctx); + assertEquals(totalListeners, searchPhaseMap.get(searchPhaseName).current.count()); + } + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java new file mode 100644 index 0000000000000..f24147a8194b4 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java @@ -0,0 +1,150 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Phaser; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SearchRequestStatsTests extends OpenSearchTestCase { + public void testSearchRequestPhaseFailure() { + SearchRequestStats testRequestStats = new SearchRequestStats(); + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + testRequestStats.onPhaseStart(ctx); + assertEquals(1, testRequestStats.getPhaseCurrent(searchPhaseName)); + testRequestStats.onPhaseFailure(ctx); + assertEquals(0, testRequestStats.getPhaseCurrent(searchPhaseName)); + } + } + + public void testSearchRequestStats() { + SearchRequestStats testRequestStats = new SearchRequestStats(); + + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + long tookTimeInMillis = randomIntBetween(1, 10); + testRequestStats.onPhaseStart(ctx); + long startTime = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(tookTimeInMillis); + when(mockSearchPhase.getStartTimeInNanos()).thenReturn(startTime); + assertEquals(1, testRequestStats.getPhaseCurrent(searchPhaseName)); + testRequestStats.onPhaseEnd(ctx); + assertEquals(0, testRequestStats.getPhaseCurrent(searchPhaseName)); + assertEquals(1, testRequestStats.getPhaseTotal(searchPhaseName)); + assertThat(testRequestStats.getPhaseMetric(searchPhaseName), greaterThanOrEqualTo(tookTimeInMillis)); + } + } + + public void testSearchRequestStatsOnPhaseStartConcurrently() throws InterruptedException { + SearchRequestStats testRequestStats = new SearchRequestStats(); + int numTasks = randomIntBetween(5, 50); + Thread[] threads = new Thread[numTasks * SearchPhaseName.values().length]; + Phaser phaser = new Phaser(numTasks * SearchPhaseName.values().length + 1); + CountDownLatch countDownLatch = new CountDownLatch(numTasks * SearchPhaseName.values().length); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + for (int i = 0; i < numTasks; i++) { + threads[i] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + testRequestStats.onPhaseStart(ctx); + countDownLatch.countDown(); + }); + threads[i].start(); + } + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + assertEquals(numTasks, testRequestStats.getPhaseCurrent(searchPhaseName)); + } + } + + public void testSearchRequestStatsOnPhaseEndConcurrently() throws InterruptedException { + SearchRequestStats testRequestStats = new SearchRequestStats(); + int numTasks = randomIntBetween(5, 50); + Thread[] threads = new Thread[numTasks * SearchPhaseName.values().length]; + Phaser phaser = new Phaser(numTasks * SearchPhaseName.values().length + 1); + CountDownLatch countDownLatch = new CountDownLatch(numTasks * SearchPhaseName.values().length); + Map searchPhaseNameLongMap = new HashMap<>(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + long tookTimeInMillis = randomIntBetween(1, 10); + long startTime = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(tookTimeInMillis); + when(mockSearchPhase.getStartTimeInNanos()).thenReturn(startTime); + for (int i = 0; i < numTasks; i++) { + threads[i] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + testRequestStats.onPhaseEnd(ctx); + countDownLatch.countDown(); + }); + threads[i].start(); + } + searchPhaseNameLongMap.put(searchPhaseName, tookTimeInMillis); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + assertEquals(numTasks, testRequestStats.getPhaseTotal(searchPhaseName)); + assertThat( + testRequestStats.getPhaseMetric(searchPhaseName), + greaterThanOrEqualTo((searchPhaseNameLongMap.get(searchPhaseName) * numTasks)) + ); + } + } + + public void testSearchRequestStatsOnPhaseFailureConcurrently() throws InterruptedException { + SearchRequestStats testRequestStats = new SearchRequestStats(); + int numTasks = randomIntBetween(5, 50); + Thread[] threads = new Thread[numTasks * SearchPhaseName.values().length]; + Phaser phaser = new Phaser(numTasks * SearchPhaseName.values().length + 1); + CountDownLatch countDownLatch = new CountDownLatch(numTasks * SearchPhaseName.values().length); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + for (int i = 0; i < numTasks; i++) { + threads[i] = new Thread(() -> { + phaser.arriveAndAwaitAdvance(); + testRequestStats.onPhaseStart(ctx); + testRequestStats.onPhaseFailure(ctx); + countDownLatch.countDown(); + }); + threads[i].start(); + } + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + assertEquals(0, testRequestStats.getPhaseCurrent(searchPhaseName)); + } + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java index 7e6dfcc9ff645..90e154e6e66fc 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java @@ -254,6 +254,7 @@ private SearchRequest mutate(SearchRequest searchRequest) { ); mutators.add(() -> mutation.source(randomValueOtherThan(searchRequest.source(), this::createSearchSourceBuilder))); mutators.add(() -> mutation.setCcsMinimizeRoundtrips(searchRequest.isCcsMinimizeRoundtrips() == false)); + mutators.add(() -> mutation.setPhaseTook(searchRequest.isPhaseTook() == false)); mutators.add( () -> mutation.setCancelAfterTimeInterval( searchRequest.getCancelAfterTimeInterval() != null diff --git a/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java b/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java index c35bdf9c14587..c9e59ab4ea04d 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java @@ -37,9 +37,13 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -47,7 +51,10 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.plugins.SearchPlugin; import org.opensearch.rest.action.search.RestSearchAction; +import org.opensearch.search.GenericSearchExtBuilder; +import org.opensearch.search.SearchExtBuilder; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.search.SearchHitsTests; @@ -67,9 +74,11 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.UUID; -import static java.util.Collections.emptyList; import static java.util.Collections.singletonMap; import static org.opensearch.test.XContentTestUtils.insertRandomFields; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -80,11 +89,25 @@ public class SearchResponseTests extends OpenSearchTestCase { static { List namedXContents = new ArrayList<>(InternalAggregationTestCase.getDefaultNamedXContents()); namedXContents.addAll(SuggestTests.getDefaultNamedXContents()); + namedXContents.add( + new NamedXContentRegistry.Entry(SearchExtBuilder.class, DummySearchExtBuilder.DUMMY_FIELD, DummySearchExtBuilder::parse) + ); xContentRegistry = new NamedXContentRegistry(namedXContents); } private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry( - new SearchModule(Settings.EMPTY, emptyList()).getNamedWriteables() + new SearchModule(Settings.EMPTY, List.of(new SearchPlugin() { + @Override + public List> getSearchExts() { + return List.of( + new SearchExtSpec<>( + DummySearchExtBuilder.DUMMY_FIELD, + DummySearchExtBuilder::new, + parser -> DummySearchExtBuilder.parse(parser) + ) + ); + } + })).getNamedWriteables() ); private AggregationsTests aggregationsTests = new AggregationsTests(); @@ -119,10 +142,23 @@ private SearchResponse createMinimalTestItem() { * if minimal is set, don't include search hits, aggregations, suggest etc... to make test simpler */ private SearchResponse createTestItem(boolean minimal, ShardSearchFailure... shardSearchFailures) { + return createTestItem(minimal, Collections.emptyList(), shardSearchFailures); + } + + public SearchResponse createTestItem( + boolean minimal, + List searchExtBuilders, + ShardSearchFailure... shardSearchFailures + ) { boolean timedOut = randomBoolean(); Boolean terminatedEarly = randomBoolean() ? null : randomBoolean(); int numReducePhases = randomIntBetween(1, 10); long tookInMillis = randomNonNegativeLong(); + Map phaseTookMap = new HashMap<>(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + phaseTookMap.put(searchPhaseName.getName(), randomNonNegativeLong()); + } + SearchResponse.PhaseTook phaseTook = new SearchResponse.PhaseTook(phaseTookMap); int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, totalShards); @@ -139,7 +175,8 @@ private SearchResponse createTestItem(boolean minimal, ShardSearchFailure... sha profileShardResults, timedOut, terminatedEarly, - numReducePhases + numReducePhases, + searchExtBuilders ); } else { internalSearchResponse = InternalSearchResponse.empty(); @@ -152,8 +189,10 @@ private SearchResponse createTestItem(boolean minimal, ShardSearchFailure... sha successfulShards, skippedShards, tookInMillis, + phaseTook, shardSearchFailures, - randomBoolean() ? randomClusters() : SearchResponse.Clusters.EMPTY + randomBoolean() ? randomClusters() : SearchResponse.Clusters.EMPTY, + null ); } @@ -172,6 +211,32 @@ public void testFromXContent() throws IOException { doFromXContentTestWithRandomFields(createTestItem(), false); } + public void testFromXContentWithSearchExtBuilders() throws IOException { + doFromXContentTestWithRandomFields(createTestItem(false, List.of(new DummySearchExtBuilder(UUID.randomUUID().toString()))), false); + } + + public void testFromXContentWithUnregisteredSearchExtBuilders() throws IOException { + List namedXContents = new ArrayList<>(InternalAggregationTestCase.getDefaultNamedXContents()); + namedXContents.addAll(SuggestTests.getDefaultNamedXContents()); + String dummyId = UUID.randomUUID().toString(); + String fakeId = UUID.randomUUID().toString(); + List extBuilders = List.of(new DummySearchExtBuilder(dummyId), new FakeSearchExtBuilder(fakeId)); + SearchResponse response = createTestItem(false, extBuilders); + MediaType xcontentType = randomFrom(XContentType.values()); + boolean humanReadable = randomBoolean(); + final ToXContent.Params params = new ToXContent.MapParams(singletonMap(RestSearchAction.TYPED_KEYS_PARAM, "true")); + BytesReference originalBytes = toShuffledXContent(response, xcontentType, params, humanReadable); + XContentParser parser = createParser(new NamedXContentRegistry(namedXContents), xcontentType.xContent(), originalBytes); + SearchResponse parsed = SearchResponse.fromXContent(parser); + assertEquals(extBuilders.size(), response.getInternalResponse().getSearchExtBuilders().size()); + + List actual = parsed.getInternalResponse().getSearchExtBuilders(); + assertEquals(extBuilders.size(), actual.size()); + for (int i = 0; i < actual.size(); i++) { + assertTrue(actual.get(0) instanceof GenericSearchExtBuilder); + } + } + /** * This test adds random fields and objects to the xContent rendered out to * ensure we can parse it back to be forward compatible with additions to @@ -182,7 +247,7 @@ public void testFromXContentWithRandomFields() throws IOException { doFromXContentTestWithRandomFields(createMinimalTestItem(), true); } - private void doFromXContentTestWithRandomFields(SearchResponse response, boolean addRandomFields) throws IOException { + public void doFromXContentTestWithRandomFields(SearchResponse response, boolean addRandomFields) throws IOException { MediaType xcontentType = randomFrom(XContentType.values()); boolean humanReadable = randomBoolean(); final ToXContent.Params params = new ToXContent.MapParams(singletonMap(RestSearchAction.TYPED_KEYS_PARAM, "true")); @@ -245,6 +310,7 @@ public void testToXContent() { SearchHit hit = new SearchHit(1, "id1", Collections.emptyMap(), Collections.emptyMap()); hit.score(2.0f); SearchHit[] hits = new SearchHit[] { hit }; + String dummyId = UUID.randomUUID().toString(); { SearchResponse response = new SearchResponse( new InternalSearchResponse( @@ -254,7 +320,8 @@ public void testToXContent() { null, false, null, - 1 + 1, + List.of(new DummySearchExtBuilder(dummyId)) ), null, 0, @@ -262,7 +329,8 @@ public void testToXContent() { 0, 0, ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY + SearchResponse.Clusters.EMPTY, + null ); StringBuilder expectedString = new StringBuilder(); expectedString.append("{"); @@ -280,13 +348,27 @@ public void testToXContent() { { expectedString.append("{\"total\":{\"value\":100,\"relation\":\"eq\"},"); expectedString.append("\"max_score\":1.5,"); - expectedString.append("\"hits\":[{\"_id\":\"id1\",\"_score\":2.0}]}"); + expectedString.append("\"hits\":[{\"_id\":\"id1\",\"_score\":2.0}]},"); + } + expectedString.append("\"ext\":"); + { + expectedString.append("{\"dummy\":\"" + dummyId + "\"}"); } } expectedString.append("}"); assertEquals(expectedString.toString(), Strings.toString(MediaTypeRegistry.JSON, response)); + List searchExtBuilders = response.getInternalResponse().getSearchExtBuilders(); + assertEquals(1, searchExtBuilders.size()); } { + Map phaseTookMap = new HashMap<>(); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + phaseTookMap.put(searchPhaseName.getName(), 0L); + } + phaseTookMap.put(SearchPhaseName.QUERY.getName(), 50L); + phaseTookMap.put(SearchPhaseName.FETCH.getName(), 25L); + phaseTookMap.put(SearchPhaseName.EXPAND.getName(), 30L); + SearchResponse.PhaseTook phaseTook = new SearchResponse.PhaseTook(phaseTookMap); SearchResponse response = new SearchResponse( new InternalSearchResponse( new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), @@ -302,13 +384,24 @@ public void testToXContent() { 0, 0, 0, + phaseTook, ShardSearchFailure.EMPTY_ARRAY, - new SearchResponse.Clusters(5, 3, 2) + new SearchResponse.Clusters(5, 3, 2), + null ); StringBuilder expectedString = new StringBuilder(); expectedString.append("{"); { expectedString.append("\"took\":0,"); + expectedString.append("\"phase_took\":"); + { + expectedString.append("{\"dfs_pre_query\":0,"); + expectedString.append("\"query\":50,"); + expectedString.append("\"fetch\":25,"); + expectedString.append("\"dfs_query\":0,"); + expectedString.append("\"expand\":30,"); + expectedString.append("\"can_match\":0},"); + } expectedString.append("\"timed_out\":false,"); expectedString.append("\"_shards\":"); { @@ -352,6 +445,48 @@ public void testSerialization() throws IOException { assertEquals(searchResponse.getClusters(), deserialized.getClusters()); } + public void testSerializationWithSearchExtBuilders() throws IOException { + String id = UUID.randomUUID().toString(); + SearchResponse searchResponse = createTestItem(false, List.of(new DummySearchExtBuilder(id))); + SearchResponse deserialized = copyWriteable(searchResponse, namedWriteableRegistry, SearchResponse::new, Version.CURRENT); + if (searchResponse.getHits().getTotalHits() == null) { + assertNull(deserialized.getHits().getTotalHits()); + } else { + assertEquals(searchResponse.getHits().getTotalHits().value, deserialized.getHits().getTotalHits().value); + assertEquals(searchResponse.getHits().getTotalHits().relation, deserialized.getHits().getTotalHits().relation); + } + assertEquals(searchResponse.getHits().getHits().length, deserialized.getHits().getHits().length); + assertEquals(searchResponse.getNumReducePhases(), deserialized.getNumReducePhases()); + assertEquals(searchResponse.getFailedShards(), deserialized.getFailedShards()); + assertEquals(searchResponse.getTotalShards(), deserialized.getTotalShards()); + assertEquals(searchResponse.getSkippedShards(), deserialized.getSkippedShards()); + assertEquals(searchResponse.getClusters(), deserialized.getClusters()); + assertEquals( + searchResponse.getInternalResponse().getSearchExtBuilders().get(0), + deserialized.getInternalResponse().getSearchExtBuilders().get(0) + ); + } + + public void testSerializationWithSearchExtBuildersOnUnsupportedWriterVersion() throws IOException { + String id = UUID.randomUUID().toString(); + SearchResponse searchResponse = createTestItem(false, List.of(new DummySearchExtBuilder(id))); + SearchResponse deserialized = copyWriteable(searchResponse, namedWriteableRegistry, SearchResponse::new, Version.V_2_9_0); + if (searchResponse.getHits().getTotalHits() == null) { + assertNull(deserialized.getHits().getTotalHits()); + } else { + assertEquals(searchResponse.getHits().getTotalHits().value, deserialized.getHits().getTotalHits().value); + assertEquals(searchResponse.getHits().getTotalHits().relation, deserialized.getHits().getTotalHits().relation); + } + assertEquals(searchResponse.getHits().getHits().length, deserialized.getHits().getHits().length); + assertEquals(searchResponse.getNumReducePhases(), deserialized.getNumReducePhases()); + assertEquals(searchResponse.getFailedShards(), deserialized.getFailedShards()); + assertEquals(searchResponse.getTotalShards(), deserialized.getTotalShards()); + assertEquals(searchResponse.getSkippedShards(), deserialized.getSkippedShards()); + assertEquals(searchResponse.getClusters(), deserialized.getClusters()); + assertEquals(1, searchResponse.getInternalResponse().getSearchExtBuilders().size()); + assertTrue(deserialized.getInternalResponse().getSearchExtBuilders().isEmpty()); + } + public void testToXContentEmptyClusters() throws IOException { SearchResponse searchResponse = new SearchResponse( InternalSearchResponse.empty(), @@ -368,4 +503,107 @@ public void testToXContentEmptyClusters() throws IOException { deserialized.getClusters().toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals(0, builder.toString().length()); } + + public void testSearchResponsePhaseTookEquals() throws IOException { + SearchResponse.PhaseTook phaseTookA = new SearchResponse.PhaseTook(Map.of("foo", 0L, "bar", 1L)); + SearchResponse.PhaseTook phaseTookB = new SearchResponse.PhaseTook(Map.of("foo", 1L, "bar", 1L)); + SearchResponse.PhaseTook phaseTookC = new SearchResponse.PhaseTook(Map.of("foo", 0L)); + SearchResponse.PhaseTook phaseTookD = new SearchResponse.PhaseTook(Map.of()); + + assertNotEquals(phaseTookA, phaseTookB); + assertNotEquals(phaseTookB, phaseTookA); + assertNotEquals(phaseTookA, phaseTookC); + assertNotEquals(phaseTookC, phaseTookA); + assertNotEquals(phaseTookA, phaseTookD); + assertNotEquals(phaseTookD, phaseTookA); + assertEquals(phaseTookA, phaseTookA); + assertEquals(phaseTookB, phaseTookB); + assertEquals(phaseTookC, phaseTookC); + assertEquals(phaseTookD, phaseTookD); + } + + static class DummySearchExtBuilder extends SearchExtBuilder { + + static ParseField DUMMY_FIELD = new ParseField("dummy"); + + protected final String id; + + public DummySearchExtBuilder(String id) { + assertNotNull(id); + this.id = id; + } + + public DummySearchExtBuilder(StreamInput in) throws IOException { + this.id = in.readString(); + } + + public String getId() { + return this.id; + } + + @Override + public String getWriteableName() { + return DUMMY_FIELD.getPreferredName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(this.id); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field("dummy", id); + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + + if (!(obj instanceof DummySearchExtBuilder)) { + return false; + } + + return this.id.equals(((DummySearchExtBuilder) obj).getId()); + } + + public static DummySearchExtBuilder parse(XContentParser parser) throws IOException { + String id; + XContentParser.Token token = parser.currentToken(); + if (token == XContentParser.Token.VALUE_STRING) { + id = parser.text(); + } else { + throw new ParsingException(parser.getTokenLocation(), "Expected a VALUE_STRING but got " + token); + } + if (id == null) { + throw new ParsingException(parser.getTokenLocation(), "no id specified for " + DUMMY_FIELD.getPreferredName()); + } + return new DummySearchExtBuilder(id); + } + } + + static class FakeSearchExtBuilder extends DummySearchExtBuilder { + static ParseField DUMMY_FIELD = new ParseField("fake"); + + public FakeSearchExtBuilder(String id) { + super(id); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(DUMMY_FIELD.getPreferredName()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(DUMMY_FIELD.getPreferredName(), id); + } + } } diff --git a/server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java b/server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java new file mode 100644 index 0000000000000..f0f1a43e6c21e --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SearchTimeProviderTests extends OpenSearchTestCase { + + public void testSearchTimeProviderPhaseFailure() { + TransportSearchAction.SearchTimeProvider testTimeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + testTimeProvider.onPhaseStart(ctx); + assertNull(testTimeProvider.getPhaseTookTime(searchPhaseName)); + testTimeProvider.onPhaseFailure(ctx); + assertNull(testTimeProvider.getPhaseTookTime(searchPhaseName)); + } + } + + public void testSearchTimeProviderPhaseEnd() { + TransportSearchAction.SearchTimeProvider testTimeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); + + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + long tookTimeInMillis = randomIntBetween(1, 100); + testTimeProvider.onPhaseStart(ctx); + long startTime = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(tookTimeInMillis); + when(mockSearchPhase.getStartTimeInNanos()).thenReturn(startTime); + assertNull(testTimeProvider.getPhaseTookTime(searchPhaseName)); + testTimeProvider.onPhaseEnd(ctx); + assertThat(testTimeProvider.getPhaseTookTime(searchPhaseName), greaterThanOrEqualTo(tookTimeInMillis)); + } + } +} diff --git a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java index 6713faf78a58c..8d3cdc070c695 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java @@ -27,6 +27,7 @@ import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.ThreadPool; @@ -141,7 +142,7 @@ public void testDeletePitSuccess() throws InterruptedException, ExecutionExcepti Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -204,7 +205,7 @@ public void testDeleteAllPITSuccess() throws InterruptedException, ExecutionExce Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -279,7 +280,7 @@ public void testDeletePitWhenNodeIsDown() throws InterruptedException, Execution Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -342,7 +343,7 @@ public void testDeletePitWhenAllNodesAreDown() { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -400,7 +401,7 @@ public void testDeletePitFailure() { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -465,7 +466,7 @@ public void testDeleteAllPitWhenNodeIsDown() { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -542,7 +543,7 @@ public void testDeleteAllPitWhenAllNodesAreDown() { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -615,7 +616,7 @@ public void testDeleteAllPitFailure() { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); diff --git a/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java index 6bb7401615b8f..48970e2b96add 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java @@ -51,6 +51,7 @@ import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; @@ -87,7 +88,8 @@ public void testParentTaskId() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ) { @Override public TaskManager getTaskManager() { @@ -151,7 +153,8 @@ public void testBatchExecute() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ) { @Override public TaskManager getTaskManager() { diff --git a/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java index e278f088508fc..c4bf8a5d87172 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java @@ -75,6 +75,7 @@ import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.sort.SortBuilders; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -234,7 +235,14 @@ public void testMergeShardsIterators() { } public void testProcessRemoteShards() { - try (TransportService transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + TransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { RemoteClusterService service = transportService.getRemoteClusterService(); assertFalse(service.isCrossClusterSearchEnabled()); Map searchShardsResponseMap = new HashMap<>(); @@ -451,7 +459,9 @@ public void testCCSRemoteReduceMergeFails() throws Exception { OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); Function reduceContext = finalReduce -> null; - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); RemoteClusterService remoteClusterService = service.getRemoteClusterService(); @@ -507,7 +517,9 @@ public void testCCSRemoteReduce() throws Exception { OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; int totalClusters = numClusters + (local ? 1 : 0); TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); RemoteClusterService remoteClusterService = service.getRemoteClusterService(); @@ -748,7 +760,9 @@ public void testCollectSearchShards() throws Exception { Settings.Builder builder = Settings.builder(); MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); Settings settings = builder.build(); - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); RemoteClusterService remoteClusterService = service.getRemoteClusterService(); diff --git a/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 22a07f15fc5ed..4305151965ab6 100644 --- a/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -68,6 +68,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -237,7 +238,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java index 56a31f4d3cbd7..9ae1310a8b15c 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java @@ -63,6 +63,7 @@ import org.opensearch.discovery.ClusterManagerNotDiscoveredException; import org.opensearch.node.NodeClosedException; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -117,7 +118,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java index 2a473002cb953..10f9708072889 100644 --- a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java @@ -46,6 +46,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -208,7 +209,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/replication/BroadcastReplicationTests.java b/server/src/test/java/org/opensearch/action/support/replication/BroadcastReplicationTests.java index 9022b32630d5a..19a9918fa4561 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/BroadcastReplicationTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/BroadcastReplicationTests.java @@ -62,6 +62,7 @@ import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.rest.RestStatus; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -115,7 +116,8 @@ public void setUp() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - circuitBreakerService + circuitBreakerService, + NoopTracer.INSTANCE ); clusterService = createClusterService(threadPool); transportService = new TransportService( @@ -125,7 +127,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java index 6b717fe187078..dad0fa0efd3ec 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java @@ -87,6 +87,7 @@ import org.opensearch.indices.IndexClosedException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.cluster.ClusterStateChanges; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.test.transport.MockTransportService; @@ -152,7 +153,7 @@ public class TransportReplicationActionTests extends OpenSearchTestCase { /** * takes a request that was sent by a {@link TransportReplicationAction} and captured * and returns the underlying request if it's wrapped or the original (cast to the expected type). - * + *

            * This will throw a {@link ClassCastException} if the request is of the wrong type. */ public static R resolveRequest(TransportRequest requestOrWrappedRequest) { @@ -195,7 +196,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -1316,7 +1318,8 @@ public void testRetryOnReplicaWithRealTransport() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, namedWriteableRegistry, - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); transportService = new MockTransportService( Settings.EMPTY, @@ -1325,7 +1328,8 @@ public void testRetryOnReplicaWithRealTransport() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java index 088c2b0eb14f4..cce8758ef1014 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationAllPermitsAcquisitionTests.java @@ -65,6 +65,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.indices.IndicesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; @@ -113,7 +114,7 @@ * This test tests the concurrent execution of several transport replication actions. All of these actions (except one) acquire a single * permit during their execution on shards and are expected to fail if a global level or index level block is present in the cluster state. * These actions are all started at the same time, but some are delayed until one last action. - * + *

            * This last action is special because it acquires all the permits on shards, adds the block to the cluster state and then "releases" the * previously delayed single permit actions. This way, there is a clear transition between the single permit actions executed before the * all permit action that sets the block and those executed afterwards that are doomed to fail because of the block. @@ -232,7 +233,8 @@ public String executor() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, bta -> node1, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java index 06a13976756a9..7212b1f5efe13 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java @@ -37,6 +37,7 @@ import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -102,7 +103,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -390,7 +392,8 @@ protected TestAction( ignore -> ThreadPool.Names.SAME, false, TransportWriteActionForIndexingPressureTests.this.indexingPressureService, - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java index c6421bfa77e70..b4549f82230bf 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java @@ -65,6 +65,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; import org.opensearch.node.NodeClosedException; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; @@ -288,7 +289,8 @@ public void testReplicaProxy() throws InterruptedException, ExecutionException { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -407,7 +409,8 @@ public void testPrimaryClosedDoesNotFailShard() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -461,7 +464,8 @@ protected TestAction(boolean withDocumentFailureOnPrimary, boolean withDocumentF TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ), TransportWriteActionTests.this.clusterService, null, @@ -473,7 +477,8 @@ protected TestAction(boolean withDocumentFailureOnPrimary, boolean withDocumentF ignore -> ThreadPool.Names.SAME, false, new IndexingPressureService(Settings.EMPTY, TransportWriteActionTests.this.clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); this.withDocumentFailureOnPrimary = withDocumentFailureOnPrimary; this.withDocumentFailureOnReplica = withDocumentFailureOnReplica; @@ -501,7 +506,8 @@ protected TestAction( ignore -> ThreadPool.Names.SAME, false, new IndexingPressureService(settings, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); this.withDocumentFailureOnPrimary = false; this.withDocumentFailureOnReplica = false; diff --git a/server/src/test/java/org/opensearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java b/server/src/test/java/org/opensearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java index 4a540f2273739..118b4e596fc66 100644 --- a/server/src/test/java/org/opensearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/single/instance/TransportInstanceSingleOperationActionTests.java @@ -57,6 +57,7 @@ import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.rest.RestStatus; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -188,7 +189,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java b/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java index 3aa4721984a43..a38e90911e201 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java +++ b/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java @@ -32,6 +32,8 @@ package org.opensearch.action.termvectors; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.LowerCaseFilter; @@ -62,22 +64,43 @@ import org.apache.lucene.store.Directory; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Locale; import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -public abstract class AbstractTermVectorsTestCase extends OpenSearchIntegTestCase { +public abstract class AbstractTermVectorsTestCase extends ParameterizedOpenSearchIntegTestCase { + + public AbstractTermVectorsTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + protected static class TestFieldSetting { public final String name; public final boolean storedOffset; diff --git a/server/src/test/java/org/opensearch/action/termvectors/TransportMultiTermVectorsActionTests.java b/server/src/test/java/org/opensearch/action/termvectors/TransportMultiTermVectorsActionTests.java index b01ac39dc515e..0868421fe1d41 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/TransportMultiTermVectorsActionTests.java +++ b/server/src/test/java/org/opensearch/action/termvectors/TransportMultiTermVectorsActionTests.java @@ -61,6 +61,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -107,7 +108,8 @@ public static void beforeClass() throws Exception { randomBase64UUID() ), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ) { @Override public TaskManager getTaskManager() { diff --git a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java index 63fe65d70d020..457bdac1809ef 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java @@ -73,6 +73,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.singletonMap; +import static org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -84,7 +85,7 @@ public void testSupersedes() { final DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), version); final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), version); final DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).build(); - ClusterName name = ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY); + ClusterName name = CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY); ClusterState noClusterManager1 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); ClusterState noClusterManager2 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); ClusterState withClusterManager1a = ClusterState.builder(name) diff --git a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java index 8ba965b3df1ab..f037b75dc16a3 100644 --- a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java @@ -190,6 +190,9 @@ public void testFillDiskUsage() { null, null, null, + null, + null, + null, null ), new NodeStats( @@ -216,6 +219,9 @@ public void testFillDiskUsage() { null, null, null, + null, + null, + null, null ), new NodeStats( @@ -242,6 +248,9 @@ public void testFillDiskUsage() { null, null, null, + null, + null, + null, null ) ); @@ -299,6 +308,9 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, + null, + null, null ), new NodeStats( @@ -325,6 +337,9 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, + null, + null, null ), new NodeStats( @@ -351,6 +366,9 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, + null, + null, null ) ); diff --git a/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java index a5017867c2e74..4cf82f1dabab3 100644 --- a/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java @@ -50,6 +50,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; @@ -552,7 +553,8 @@ private TestTransportService(Transport transport, ThreadPool threadPool) { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(Settings.EMPTY, buildNewFakeTransportAddress(), UUIDs.randomBase64UUID()), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java index 9e8ef3a325d92..efe91de1ae1a8 100644 --- a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java @@ -57,6 +57,7 @@ import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -155,7 +156,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java b/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java index 0cde0262a8f38..775d113f986ca 100644 --- a/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/shard/routing/weighted/get/TransportGetWeightedRoutingActionTests.java @@ -30,6 +30,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; @@ -90,8 +91,8 @@ public void setUpService() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> clusterService.state().nodes().get("nodes1"), null, - Collections.emptySet() - + Collections.emptySet(), + NoopTracer.INSTANCE ); Settings.Builder settingsBuilder = Settings.builder() diff --git a/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealthTests.java b/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealthTests.java index 822d9c416d8d0..7910daebb00de 100644 --- a/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealthTests.java +++ b/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessAttributesHealthTests.java @@ -16,6 +16,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -64,7 +65,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealthTests.java b/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealthTests.java index 8afe343ccd56d..b68f0f2375354 100644 --- a/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealthTests.java +++ b/server/src/test/java/org/opensearch/cluster/awarenesshealth/ClusterAwarenessHealthTests.java @@ -29,6 +29,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -82,7 +83,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java index 03b35fe8c9f36..0b84eb19f4264 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceDeprecatedMasterTests.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.settings.Settings; import org.opensearch.discovery.DiscoveryModule; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.transport.TransportRequest; @@ -100,7 +101,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java index be7b32d4aef11..bfb225854979b 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.settings.Settings; import org.opensearch.discovery.DiscoveryModule; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.transport.TransportRequest; @@ -101,7 +102,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java index 45e71138abf99..1c0dc7fc1ca2d 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; import org.opensearch.cluster.coordination.CoordinationState.PersistedState; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; @@ -44,19 +45,35 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.Assertions; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.gateway.GatewayMetaState.RemotePersistedState; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.EqualsHashCodeTestUtils; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; +import java.io.IOException; import java.util.Collections; +import java.util.Locale; import java.util.Optional; import java.util.stream.Collectors; import java.util.stream.IntStream; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; public class CoordinationStateTests extends OpenSearchTestCase { @@ -67,6 +84,7 @@ public class CoordinationStateTests extends OpenSearchTestCase { private ClusterState initialStateNode1; private PersistedState ps1; + private PersistedStateRegistry psr1; private CoordinationState cs1; private CoordinationState cs2; @@ -97,10 +115,12 @@ public void setupNodes() { ); ps1 = new InMemoryPersistedState(0L, initialStateNode1); + psr1 = persistedStateRegistry(); + psr1.addPersistedState(PersistedStateType.LOCAL, ps1); - cs1 = createCoordinationState(ps1, node1); - cs2 = createCoordinationState(new InMemoryPersistedState(0L, initialStateNode2), node2); - cs3 = createCoordinationState(new InMemoryPersistedState(0L, initialStateNode3), node3); + cs1 = createCoordinationState(psr1, node1, Settings.EMPTY); + cs2 = createCoordinationState(createPersistedStateRegistry(initialStateNode2), node2, Settings.EMPTY); + cs3 = createCoordinationState(createPersistedStateRegistry(initialStateNode3), node3, Settings.EMPTY); } public static DiscoveryNode createNode(String id) { @@ -200,7 +220,7 @@ public void testJoinBeforeBootstrap() { public void testJoinWithNoStartJoinAfterReboot() { StartJoinRequest startJoinRequest1 = new StartJoinRequest(node1, randomLongBetween(1, 5)); Join v1 = cs1.handleStartJoin(startJoinRequest1); - cs1 = createCoordinationState(ps1, node1); + cs1 = createCoordinationState(psr1, node1, Settings.EMPTY); assertThat( expectThrows(CoordinationStateRejectedException.class, () -> cs1.handleJoin(v1)).getMessage(), containsString("ignored join as term has not been incremented yet after reboot") @@ -886,8 +906,104 @@ public void testSafety() { ).runRandomly(); } - public static CoordinationState createCoordinationState(PersistedState storage, DiscoveryNode localNode) { - return new CoordinationState(localNode, storage, ElectionStrategy.DEFAULT_INSTANCE); + public void testHandlePrePublishAndCommitWhenRemoteStateDisabled() { + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, ps1); + final PersistedStateRegistry persistedStateRegistrySpy = Mockito.spy(persistedStateRegistry); + final CoordinationState coordinationState = createCoordinationState(persistedStateRegistrySpy, node1, Settings.EMPTY); + final VotingConfiguration initialConfig = VotingConfiguration.of(node1); + final ClusterState clusterState = clusterState(0L, 0L, node1, initialConfig, initialConfig, 42L); + coordinationState.handlePrePublish(clusterState); + Mockito.verify(persistedStateRegistrySpy, Mockito.times(0)).getPersistedState(PersistedStateType.REMOTE); + assertThat(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE), nullValue()); + final ClusterState clusterState2 = clusterState(0L, 1L, node1, initialConfig, initialConfig, 42L); + coordinationState.handlePrePublish(clusterState2); + Mockito.verify(persistedStateRegistrySpy, Mockito.times(0)).getPersistedState(PersistedStateType.REMOTE); + assertThat(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE), nullValue()); + coordinationState.handlePreCommit(); + Mockito.verify(persistedStateRegistrySpy, Mockito.times(0)).getPersistedState(PersistedStateType.REMOTE); + assertThat(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE), nullValue()); + } + + public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOException { + final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); + final VotingConfiguration initialConfig = VotingConfiguration.of(node1); + final ClusterState clusterState = clusterState(0L, 0L, node1, initialConfig, initialConfig, 42L); + final String previousClusterUUID = "prev-cluster-uuid"; + final ClusterMetadataManifest manifest = new ClusterMetadataManifest( + 0L, + 0L, + randomAlphaOfLength(10), + randomAlphaOfLength(10), + Version.CURRENT, + randomAlphaOfLength(10), + false, + 1, + randomAlphaOfLength(10), + Collections.emptyList(), + randomAlphaOfLength(10), + true + ); + Mockito.when(remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID)).thenReturn(manifest); + + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, ps1); + persistedStateRegistry.addPersistedState( + PersistedStateType.REMOTE, + new RemotePersistedState(remoteClusterStateService, previousClusterUUID) + ); + + String randomRepoName = "randomRepoName"; + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + randomRepoName + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + randomRepoName + ); + + Settings settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, randomRepoName) + .put(stateRepoTypeAttributeKey, FsRepository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build(); + + final CoordinationState coordinationState = createCoordinationState(persistedStateRegistry, node1, settings); + coordinationState.handlePrePublish(clusterState); + Mockito.verify(remoteClusterStateService, Mockito.times(1)).writeFullMetadata(clusterState, previousClusterUUID); + assertThat(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedState(), equalTo(clusterState)); + + coordinationState.handlePreCommit(); + ClusterState committedClusterState = ClusterState.builder(clusterState) + .metadata(Metadata.builder(clusterState.metadata()).clusterUUIDCommitted(true).build()) + .build(); + // Mockito.verify(remoteClusterStateService, Mockito.times(1)).markLastStateAsCommitted(committedClusterState, manifest); + ArgumentCaptor clusterStateCaptor = ArgumentCaptor.forClass(ClusterState.class); + verify(remoteClusterStateService, times(1)).markLastStateAsCommitted(clusterStateCaptor.capture(), any()); + assertThat(clusterStateCaptor.getValue().metadata().indices(), equalTo(committedClusterState.metadata().indices())); + assertThat(clusterStateCaptor.getValue().metadata().clusterUUID(), equalTo(committedClusterState.metadata().clusterUUID())); + assertThat(clusterStateCaptor.getValue().stateUUID(), equalTo(committedClusterState.stateUUID())); + assertThat( + clusterStateCaptor.getValue().coordinationMetadata().term(), + equalTo(committedClusterState.coordinationMetadata().term()) + ); + assertThat(clusterStateCaptor.getValue().version(), equalTo(committedClusterState.version())); + assertThat( + clusterStateCaptor.getValue().metadata().clusterUUIDCommitted(), + equalTo(committedClusterState.metadata().clusterUUIDCommitted()) + ); + } + + public static CoordinationState createCoordinationState( + PersistedStateRegistry persistedStateRegistry, + DiscoveryNode localNode, + Settings settings + ) { + return new CoordinationState(localNode, persistedStateRegistry, ElectionStrategy.DEFAULT_INSTANCE, settings); } public static ClusterState clusterState( @@ -950,4 +1066,10 @@ public static ClusterState setValue(ClusterState clusterState, long value) { public static long value(ClusterState clusterState) { return clusterState.metadata().persistentSettings().getAsLong("value", 0L); } + + private static PersistedStateRegistry createPersistedStateRegistry(ClusterState clusterState) { + final PersistedStateRegistry persistedStateRegistry = new PersistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, new InMemoryPersistedState(0L, clusterState)); + return persistedStateRegistry; + } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java index d08a80208c533..a3129655148ab 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java @@ -1256,7 +1256,7 @@ public void testCannotJoinClusterWithDifferentUUID() throws IllegalAccessExcepti final ClusterNode newNode = cluster1.new ClusterNode( nextNodeIndex.getAndIncrement(), nodeInOtherCluster.getLocalNode(), n -> cluster1.new MockPersistedState( - n, nodeInOtherCluster.persistedState, Function.identity(), Function.identity() + n, nodeInOtherCluster.persistedStateRegistry, Function.identity(), Function.identity() ), nodeInOtherCluster.nodeSettings, () -> new StatusInfo(StatusInfo.Status.HEALTHY, "healthy-info") ); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java index 30512fd96088e..c152a1606681e 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java @@ -46,6 +46,7 @@ import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.EqualsHashCodeTestUtils; import org.opensearch.test.EqualsHashCodeTestUtils.CopyFunction; import org.opensearch.test.OpenSearchTestCase; @@ -123,7 +124,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -285,7 +287,8 @@ public String toString() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -371,7 +374,8 @@ public String toString() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -475,7 +479,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> follower, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -546,7 +551,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> follower, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -701,7 +707,8 @@ public void testPreferClusterManagerNodes() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> nodes.get(0), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); final FollowersChecker followersChecker = new FollowersChecker( Settings.EMPTY, diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java index be0161b84d6fa..78c3b5d45a9ab 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java @@ -40,16 +40,22 @@ import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.SetOnce; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.transport.TransportResponse; import org.opensearch.monitor.StatusInfo; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.test.transport.CapturingTransport.CapturedRequest; import org.opensearch.test.transport.MockTransport; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.BytesTransportRequest; import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.TransportException; @@ -73,6 +79,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.Is.is; +import static org.mockito.Mockito.mock; public class JoinHelperTests extends OpenSearchTestCase { private final NamedWriteableRegistry namedWriteableRegistry = DEFAULT_NAMED_WRITABLE_REGISTRY; @@ -90,13 +97,15 @@ public void testJoinDeduplication() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); JoinHelper joinHelper = new JoinHelper( Settings.EMPTY, null, null, transportService, + buildRemoteStoreNodeService(transportService, deterministicTaskQueue.getThreadPool()), () -> 0L, () -> null, (joinRequest, joinCallback) -> { @@ -274,7 +283,8 @@ public void testJoinFailureOnUnhealthyNodes() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); AtomicReference nodeHealthServiceStatus = new AtomicReference<>(new StatusInfo(UNHEALTHY, "unhealthy-info")); JoinHelper joinHelper = new JoinHelper( @@ -282,6 +292,7 @@ public void testJoinFailureOnUnhealthyNodes() { null, null, transportService, + buildRemoteStoreNodeService(transportService, deterministicTaskQueue.getThreadPool()), () -> 0L, () -> null, (joinRequest, joinCallback) -> { @@ -464,7 +475,8 @@ private TestClusterSetup getTestClusterSetup(Version version, boolean isCapturin TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); } else { transportService = mockTransport.createTransportService( @@ -473,7 +485,8 @@ private TestClusterSetup getTestClusterSetup(Version version, boolean isCapturin TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); } JoinHelper joinHelper = new JoinHelper( @@ -481,6 +494,7 @@ private TestClusterSetup getTestClusterSetup(Version version, boolean isCapturin null, null, transportService, + buildRemoteStoreNodeService(transportService, deterministicTaskQueue.getThreadPool()), () -> 0L, () -> localClusterState, (joinRequest, joinCallback) -> { @@ -500,6 +514,18 @@ private TestClusterSetup getTestClusterSetup(Version version, boolean isCapturin return new TestClusterSetup(deterministicTaskQueue, localNode, transportService, localClusterState, joinHelper, capturingTransport); } + private RemoteStoreNodeService buildRemoteStoreNodeService(TransportService transportService, ThreadPool threadPool) { + RepositoriesService repositoriesService = new RepositoriesService( + Settings.EMPTY, + mock(ClusterService.class), + transportService, + Collections.emptyMap(), + Collections.emptyMap(), + threadPool + ); + return new RemoteStoreNodeService(new SetOnce<>(repositoriesService)::get, threadPool); + } + private static class TestClusterSetup { public final DeterministicTaskQueue deterministicTaskQueue; public final DiscoveryNode localNode; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 36343b43c34b9..084805bd098fa 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -42,23 +42,37 @@ import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.transport.TransportService; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; +import java.util.stream.Collectors; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.test.VersionUtils.maxCompatibleVersion; import static org.opensearch.test.VersionUtils.randomCompatibleVersion; import static org.opensearch.test.VersionUtils.randomVersion; @@ -111,15 +125,21 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { builder.add(new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), randomCompatibleVersion(random(), version))); DiscoveryNodes nodes = builder.build(); + Metadata metadata = Metadata.EMPTY_METADATA; + final Version maxNodeVersion = nodes.getMaxNodeVersion(); final Version minNodeVersion = nodes.getMinNodeVersion(); if (maxNodeVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - final Version tooLow = LegacyESVersion.fromString("6.7.0"); + final DiscoveryNode tooLowJoiningNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + LegacyESVersion.fromString("6.7.0") + ); expectThrows(IllegalStateException.class, () -> { if (randomBoolean()) { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, nodes); + JoinTaskExecutor.ensureNodesCompatibility(tooLowJoiningNode, nodes, metadata); } else { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, minNodeVersion, maxNodeVersion); + JoinTaskExecutor.ensureNodesCompatibility(tooLowJoiningNode, nodes, metadata, minNodeVersion, maxNodeVersion); } }); } @@ -133,11 +153,11 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { // we have to stick with the same major minNodeVersion : maxNodeVersion.minimumCompatibilityVersion(); final Version justGood = randomVersionBetween(random(), minGoodVersion, maxCompatibleVersion(minNodeVersion)); - + final DiscoveryNode justGoodJoiningNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), justGood); if (randomBoolean()) { - JoinTaskExecutor.ensureNodesCompatibility(justGood, nodes); + JoinTaskExecutor.ensureNodesCompatibility(justGoodJoiningNode, nodes, metadata); } else { - JoinTaskExecutor.ensureNodesCompatibility(justGood, minNodeVersion, maxNodeVersion); + JoinTaskExecutor.ensureNodesCompatibility(justGoodJoiningNode, nodes, metadata, minNodeVersion, maxNodeVersion); } } @@ -173,8 +193,17 @@ public void testUpdatesNodeWithNewRoles() throws Exception { final AllocationService allocationService = mock(AllocationService.class); when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + final RemoteStoreNodeService remoteStoreNodeService = mock(RemoteStoreNodeService.class); + when(remoteStoreNodeService.updateRepositoriesMetadata(any(), any())).thenReturn(new RepositoriesMetadata(Collections.emptyList())); - final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, rerouteService, null); + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + null, + remoteStoreNodeService + ); final DiscoveryNode clusterManagerNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); @@ -236,6 +265,7 @@ public void testUpdatesNodeWithOpenSearchVersionForExistingAndNewNodes() throws channelVersions.put(node_6, LegacyESVersion.V_7_10_0); final TransportService transportService = mock(TransportService.class); + final RemoteStoreNodeService remoteStoreNodeService = mock(RemoteStoreNodeService.class); when(transportService.getChannelVersion(any())).thenReturn(channelVersions); DiscoveryNodes.Builder nodes = new DiscoveryNodes.Builder().localNodeId(node_1); nodes.add(new DiscoveryNode(node_1, buildNewFakeTransportAddress(), LegacyESVersion.V_7_10_2)); @@ -251,7 +281,8 @@ public void testUpdatesNodeWithOpenSearchVersionForExistingAndNewNodes() throws allocationService, logger, rerouteService, - transportService + transportService, + remoteStoreNodeService ); final DiscoveryNode existing_node_3 = clusterState.nodes().get(node_3); final DiscoveryNode node_3_new_join = new DiscoveryNode( @@ -346,8 +377,16 @@ public void testJoinFailedForDecommissionedNode() throws Exception { final AllocationService allocationService = mock(AllocationService.class); when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + final RemoteStoreNodeService remoteStoreNodeService = mock(RemoteStoreNodeService.class); - final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, rerouteService, null); + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + null, + remoteStoreNodeService + ); final DiscoveryNode clusterManagerNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); @@ -402,6 +441,414 @@ public void testJoinClusterWithDecommissionFailed() { JoinTaskExecutor.ensureNodeCommissioned(discoveryNode, metadata); } + public void testJoinClusterWithNonRemoteStoreNodeJoining() { + DiscoveryNode joiningNode = newDiscoveryNode(Collections.emptyMap()); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(joiningNode).build()) + .build(); + + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testJoinClusterWithRemoteStoreNodeJoining() { + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(joiningNode).build()) + .build(); + + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testJoinClusterWithNonRemoteStoreNodeJoiningNonRemoteStoreCluster() { + final DiscoveryNode existingNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(Collections.emptyMap()); + + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testPreventJoinClusterWithRemoteStoreNodeJoiningNonRemoteStoreCluster() { + final DiscoveryNode existingNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + Exception e = assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()) + ); + assertTrue(e.getMessage().equals("a remote store node [" + joiningNode + "] is trying to join a non remote " + "store cluster")); + } + + public void testJoinClusterWithRemoteStoreNodeJoiningRemoteStoreCluster() { + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testPreventJoinClusterWithRemoteStoreNodeWithDifferentAttributesJoiningRemoteStoreCluster() { + Map existingNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + existingNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + for (Map.Entry nodeAttribute : existingNodeAttributes.entrySet()) { + if (nodeAttribute.getKey() != REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY + && nodeAttribute.getKey() != REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY + && nodeAttribute.getKey() != REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY) { + remoteStoreNodeAttributes.put(nodeAttribute.getKey(), nodeAttribute.getValue() + "-new"); + validateAttributes(remoteStoreNodeAttributes, currentState, existingNode); + remoteStoreNodeAttributes.put(nodeAttribute.getKey(), nodeAttribute.getValue()); + } + } + } + + public void testPreventJoinClusterWithRemoteStoreNodeWithDifferentNameAttributesJoiningRemoteStoreCluster() { + Map existingNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + existingNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + for (Map.Entry nodeAttribute : existingNodeAttributes.entrySet()) { + if (REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY.equals(nodeAttribute.getKey())) { + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO + "new", TRANSLOG_REPO); + validateAttributes(remoteStoreNodeAttributes, currentState, existingNode); + } else if (REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY.equals(nodeAttribute.getKey())) { + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO + "new"); + validateAttributes(remoteStoreNodeAttributes, currentState, existingNode); + } else if (REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY.equals(nodeAttribute.getKey())) { + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes( + SEGMENT_REPO, + TRANSLOG_REPO, + CLUSTER_STATE_REPO + "new" + ); + validateAttributes(remoteStoreNodeAttributes, currentState, existingNode); + } + } + } + + public void testPreventJoinClusterWithNonRemoteStoreNodeJoiningRemoteStoreCluster() { + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(Collections.emptyMap()); + Exception e = assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()) + ); + assertTrue(e.getMessage().equals("a non remote store node [" + joiningNode + "] is trying to join a remote " + "store cluster")); + } + + public void testPreventJoinClusterWithRemoteStoreNodeWithPartialAttributesJoiningRemoteStoreCluster() { + Map existingNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + existingNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + for (Map.Entry nodeAttribute : existingNodeAttributes.entrySet()) { + remoteStoreNodeAttributes.put(nodeAttribute.getKey(), null); + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes); + Exception e = assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()) + ); + assertTrue( + e.getMessage().equals("joining node [" + joiningNode + "] doesn't have the node attribute [" + nodeAttribute.getKey() + "]") + ); + + remoteStoreNodeAttributes.put(nodeAttribute.getKey(), nodeAttribute.getValue()); + } + } + + public void testUpdatesClusterStateWithSingleNodeCluster() throws Exception { + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService( + new SetOnce<>(mock(RepositoriesService.class))::get, + null + ); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + null, + remoteStoreNodeService + ); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .build(); + + final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( + clusterState, + List.of(new JoinTaskExecutor.Task(clusterManagerNode, "elect leader")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertTrue(taskResult.isSuccess()); + validateRepositoryMetadata(result.resultingState, clusterManagerNode, 3); + } + + public void testUpdatesClusterStateWithMultiNodeCluster() throws Exception { + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService( + new SetOnce<>(mock(RepositoriesService.class))::get, + null + ); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + null, + remoteStoreNodeService + ); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final RepositoryMetadata segmentRepositoryMetadata = buildRepositoryMetadata(clusterManagerNode, SEGMENT_REPO); + final RepositoryMetadata translogRepositoryMetadata = buildRepositoryMetadata(clusterManagerNode, TRANSLOG_REPO); + List repositoriesMetadata = new ArrayList<>() { + { + add(segmentRepositoryMetadata); + add(translogRepositoryMetadata); + } + }; + + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .metadata(Metadata.builder().putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata))) + .build(); + + final DiscoveryNode joiningNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( + clusterState, + List.of(new JoinTaskExecutor.Task(joiningNode, "test")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertTrue(taskResult.isSuccess()); + validateRepositoryMetadata(result.resultingState, clusterManagerNode, 3); + } + + public void testUpdatesClusterStateWithSingleNodeClusterAndSameRepository() throws Exception { + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes(COMMON_REPO, COMMON_REPO); + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService( + new SetOnce<>(mock(RepositoriesService.class))::get, + null + ); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + null, + remoteStoreNodeService + ); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .build(); + + final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( + clusterState, + List.of(new JoinTaskExecutor.Task(clusterManagerNode, "elect leader")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertTrue(taskResult.isSuccess()); + validateRepositoryMetadata(result.resultingState, clusterManagerNode, 2); + } + + public void testUpdatesClusterStateWithMultiNodeClusterAndSameRepository() throws Exception { + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes(COMMON_REPO, COMMON_REPO); + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService( + new SetOnce<>(mock(RepositoriesService.class))::get, + null + ); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + null, + remoteStoreNodeService + ); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final RepositoryMetadata segmentRepositoryMetadata = buildRepositoryMetadata(clusterManagerNode, COMMON_REPO); + List repositoriesMetadata = new ArrayList<>() { + { + add(segmentRepositoryMetadata); + } + }; + + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .metadata(Metadata.builder().putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata))) + .build(); + + final DiscoveryNode joiningNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( + clusterState, + List.of(new JoinTaskExecutor.Task(joiningNode, "test")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertTrue(taskResult.isSuccess()); + validateRepositoryMetadata(result.resultingState, clusterManagerNode, 2); + } + + private void validateRepositoryMetadata(ClusterState updatedState, DiscoveryNode existingNode, int expectedRepositories) + throws Exception { + + final RepositoriesMetadata repositoriesMetadata = updatedState.metadata().custom(RepositoriesMetadata.TYPE); + assertTrue(repositoriesMetadata.repositories().size() == expectedRepositories); + if (repositoriesMetadata.repositories().size() == 2 || repositoriesMetadata.repositories().size() == 3) { + final RepositoryMetadata segmentRepositoryMetadata = buildRepositoryMetadata(existingNode, SEGMENT_REPO); + final RepositoryMetadata translogRepositoryMetadata = buildRepositoryMetadata(existingNode, TRANSLOG_REPO); + for (RepositoryMetadata repositoryMetadata : repositoriesMetadata.repositories()) { + if (repositoryMetadata.name().equals(segmentRepositoryMetadata.name())) { + assertTrue(segmentRepositoryMetadata.equalsIgnoreGenerations(repositoryMetadata)); + } else if (repositoryMetadata.name().equals(translogRepositoryMetadata.name())) { + assertTrue(translogRepositoryMetadata.equalsIgnoreGenerations(repositoryMetadata)); + } else if (repositoriesMetadata.repositories().size() == 3) { + final RepositoryMetadata clusterStateRepoMetadata = buildRepositoryMetadata(existingNode, CLUSTER_STATE_REPO); + assertTrue(clusterStateRepoMetadata.equalsIgnoreGenerations(repositoryMetadata)); + } + } + } else if (repositoriesMetadata.repositories().size() == 1) { + final RepositoryMetadata repositoryMetadata = buildRepositoryMetadata(existingNode, COMMON_REPO); + assertTrue(repositoryMetadata.equalsIgnoreGenerations(repositoriesMetadata.repositories().get(0))); + } else { + throw new Exception("Stack overflow example: checkedExceptionThrower"); + } + } + private DiscoveryNode newDiscoveryNode(Map attributes) { return new DiscoveryNode( randomAlphaOfLength(10), @@ -412,4 +859,99 @@ private DiscoveryNode newDiscoveryNode(Map attributes) { Version.CURRENT ); } + + private static final String SEGMENT_REPO = "segment-repo"; + private static final String TRANSLOG_REPO = "translog-repo"; + private static final String CLUSTER_STATE_REPO = "cluster-state-repo"; + private static final String COMMON_REPO = "remote-repo"; + + private Map remoteStoreNodeAttributes(String segmentRepoName, String translogRepoName) { + return remoteStoreNodeAttributes(segmentRepoName, translogRepoName, CLUSTER_STATE_REPO); + } + + private Map remoteStoreNodeAttributes(String segmentRepoName, String translogRepoName, String clusterStateRepo) { + String segmentRepositoryTypeAttributeKey = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + segmentRepoName + ); + String segmentRepositorySettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + segmentRepoName + ); + String translogRepositoryTypeAttributeKey = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + translogRepoName + ); + String translogRepositorySettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + translogRepoName + ); + String clusterStateRepositoryTypeAttributeKey = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + clusterStateRepo + ); + String clusterStateRepositorySettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + clusterStateRepo + ); + + return new HashMap<>() { + { + put(REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName); + put(segmentRepositoryTypeAttributeKey, "s3"); + put(segmentRepositorySettingsAttributeKeyPrefix + "bucket", "segment_bucket"); + put(segmentRepositorySettingsAttributeKeyPrefix + "base_path", "/segment/path"); + put(REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName); + putIfAbsent(translogRepositoryTypeAttributeKey, "s3"); + putIfAbsent(translogRepositorySettingsAttributeKeyPrefix + "bucket", "translog_bucket"); + putIfAbsent(translogRepositorySettingsAttributeKeyPrefix + "base_path", "/translog/path"); + put(REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, clusterStateRepo); + putIfAbsent(clusterStateRepositoryTypeAttributeKey, "s3"); + putIfAbsent(clusterStateRepositorySettingsAttributeKeyPrefix + "bucket", "state_bucket"); + putIfAbsent(clusterStateRepositorySettingsAttributeKeyPrefix + "base_path", "/state/path"); + } + }; + } + + private void validateAttributes(Map remoteStoreNodeAttributes, ClusterState currentState, DiscoveryNode existingNode) { + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes); + Exception e = assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()) + ); + assertEquals( + e.getMessage(), + "a remote store node [" + + joiningNode + + "] is trying to join a remote store cluster with incompatible node attributes in " + + "comparison with existing node [" + + existingNode + + "]" + ); + } + + private RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name) { + Map nodeAttributes = node.getAttributes(); + String type = nodeAttributes.get(String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, name)); + + String settingsAttributeKeyPrefix = String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, name); + Map settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> node.getAttributes().get(key))); + + Settings.Builder settings = Settings.builder(); + settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue())); + + settings.put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true); + + return new RepositoryMetadata(name, type, settings.build()); + } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java index 180d0ffe649e2..8915f4c5c1274 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java @@ -43,6 +43,7 @@ import org.opensearch.core.transport.TransportResponse; import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.monitor.StatusInfo; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.EqualsHashCodeTestUtils; import org.opensearch.test.EqualsHashCodeTestUtils.CopyFunction; import org.opensearch.test.OpenSearchTestCase; @@ -165,7 +166,8 @@ public String toString() { NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -281,7 +283,8 @@ public String toString() { NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -393,7 +396,8 @@ public String toString() { NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -438,7 +442,8 @@ public void testLeaderBehaviour() { NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index ab91099cae11f..d94f3fb304fe2 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -38,6 +38,7 @@ import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.decommission.DecommissionAttribute; import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; import org.opensearch.cluster.decommission.DecommissionStatus; @@ -59,6 +60,8 @@ import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; @@ -89,6 +92,8 @@ import java.util.stream.IntStream; import java.util.stream.Stream; +import org.mockito.Mockito; + import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.singleton; @@ -243,8 +248,11 @@ protected void onSendRequest( TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> initialState.nodes().getLocalNode(), clusterSettings, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, new InMemoryPersistedState(term, initialState)); coordinator = new Coordinator( "test_node", Settings.EMPTY, @@ -253,14 +261,16 @@ protected void onSendRequest( writableRegistry(), OpenSearchAllocationTestCase.createAllocationService(Settings.EMPTY), clusterManagerService, - () -> new InMemoryPersistedState(term, initialState), + () -> persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL), r -> emptyList(), new NoOpClusterApplier(), Collections.emptyList(), random, (s, p, r) -> {}, ElectionStrategy.DEFAULT_INSTANCE, - nodeHealthService + nodeHealthService, + persistedStateRegistry, + Mockito.mock(RemoteStoreNodeService.class) ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/PreVoteCollectorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/PreVoteCollectorTests.java index 8b35856f0fb4c..5ddf614db3334 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/PreVoteCollectorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/PreVoteCollectorTests.java @@ -35,11 +35,13 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.monitor.StatusInfo; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.transport.ConnectTransportException; @@ -135,7 +137,8 @@ public void handleRemoteError(long requestId, Throwable t) { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -290,10 +293,16 @@ public void testPrevotingIndicatesElectionSuccess() { DiscoveryNode[] votingNodes = votingNodesSet.toArray(new DiscoveryNode[0]); startAndRunCollector(votingNodes); + PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState( + PersistedStateType.LOCAL, + new InMemoryPersistedState(currentTerm, makeClusterState(votingNodes)) + ); final CoordinationState coordinationState = new CoordinationState( localNode, - new InMemoryPersistedState(currentTerm, makeClusterState(votingNodes)), - ElectionStrategy.DEFAULT_INSTANCE + persistedStateRegistry, + ElectionStrategy.DEFAULT_INSTANCE, + Settings.EMPTY ); final long newTerm = randomLongBetween(currentTerm + 1, Long.MAX_VALUE); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java index 79c141aa69b9f..4d18ff95887dd 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java @@ -35,6 +35,7 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; @@ -91,11 +92,9 @@ class MockNode { CoordinationMetadata.VotingConfiguration.EMPTY_CONFIG, 0L ); - coordinationState = new CoordinationState( - localNode, - new InMemoryPersistedState(0L, initialState), - ElectionStrategy.DEFAULT_INSTANCE - ); + PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, new InMemoryPersistedState(0L, initialState)); + coordinationState = new CoordinationState(localNode, persistedStateRegistry, ElectionStrategy.DEFAULT_INSTANCE, Settings.EMPTY); } final DiscoveryNode localNode; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTransportHandlerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTransportHandlerTests.java index 2ff78d3b68082..6d94054afdea2 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTransportHandlerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTransportHandlerTests.java @@ -43,6 +43,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.node.Node; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.transport.TransportService; @@ -68,7 +69,8 @@ public void testDiffSerializationFailure() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, clusterSettings, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); final PublicationTransportHandler handler = new PublicationTransportHandler( transportService, diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java index 7d50ab5dfeb1b..627f31502a417 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java @@ -28,6 +28,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.action.ActionListener; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; @@ -91,7 +92,8 @@ public void setTransportServiceAndDefaultClusterState() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> clusterService.state().nodes().get("node1"), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); final Settings.Builder nodeSettingsBuilder = Settings.builder(); diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java index 992d35290652e..3ead45a0f42fe 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java @@ -28,6 +28,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; @@ -103,7 +104,8 @@ public void setUpService() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> clusterService.state().nodes().get("node1"), null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); final Settings.Builder nodeSettingsBuilder = Settings.builder() diff --git a/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java b/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java index c1c14188db97c..795dc8a624e38 100644 --- a/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java +++ b/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java @@ -59,6 +59,7 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.gateway.TestGatewayAllocator; import org.opensearch.test.transport.CapturingTransport; @@ -117,7 +118,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 73a20eaf4a1fb..91f1e8bf29988 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -62,7 +62,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -72,6 +71,8 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.translog.Translog; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidAliasNameException; import org.opensearch.indices.InvalidIndexNameException; import org.opensearch.indices.ShardLimitValidator; @@ -80,13 +81,13 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.ClusterServiceUtils; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.gateway.TestGatewayAllocator; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.hamcrest.Matchers; +import org.junit.After; import org.junit.Before; import java.io.IOException; @@ -131,13 +132,19 @@ import static org.opensearch.cluster.metadata.MetadataCreateIndexService.getIndexNumberOfRoutingShards; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.parseV1Mappings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.resolveAndValidateAliases; +import static org.opensearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; +import static org.opensearch.index.IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.ShardLimitValidatorTests.createTestShardLimitService; +import static org.opensearch.node.Node.NODE_ATTRIBUTES; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; @@ -154,6 +161,17 @@ public class MetadataCreateIndexServiceTests extends OpenSearchTestCase { private AliasValidator aliasValidator; private CreateIndexClusterStateUpdateRequest request; private QueryShardContext queryShardContext; + private ClusterSettings clusterSettings; + private static final String segmentRepositoryNameAttributeKey = NODE_ATTRIBUTES.getKey() + + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; + private static final String translogRepositoryNameAttributeKey = NODE_ATTRIBUTES.getKey() + + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; + + @Before + public void setup() throws Exception { + super.setUp(); + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + } @Before public void setupCreateIndexRequestAndAliasValidator() { @@ -827,7 +845,8 @@ public void testAggregateSettingsAppliesSettingsFromTemplatesAndRequest() { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); assertThat(aggregatedIndexSettings.get("template_setting"), equalTo("value1")); @@ -889,7 +908,8 @@ public void testRequestDataHavePriorityOverTemplateData() throws Exception { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); assertThat(resolvedAliases.get(0).getSearchRouting(), equalTo("fromRequest")); @@ -911,7 +931,8 @@ public void testDefaultSettings() { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); assertThat(aggregatedIndexSettings.get(SETTING_NUMBER_OF_SHARDS), equalTo("1")); @@ -926,7 +947,8 @@ public void testSettingsFromClusterState() { Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 15).build(), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); assertThat(aggregatedIndexSettings.get(SETTING_NUMBER_OF_SHARDS), equalTo("15")); @@ -963,7 +985,8 @@ public void testTemplateOrder() throws Exception { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); List resolvedAliases = resolveAndValidateAliases( request.index(), @@ -1002,7 +1025,8 @@ public void testAggregateIndexSettingsIgnoresTemplatesOnCreateFromSourceIndex() Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); assertThat(aggregatedIndexSettings.get("templateSetting"), is(nullValue())); @@ -1163,6 +1187,8 @@ public void testvalidateIndexSettings() { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a, b") .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "rack.values", "c, d, e") .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) + .put(CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey(), true) + .put(SETTING_REPLICATION_TYPE, randomFrom(ReplicationType.values())) .build(); ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); when(clusterService.getSettings()).thenReturn(settings); @@ -1186,8 +1212,12 @@ public void testvalidateIndexSettings() { ); List validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); - assertThat(validationErrors.size(), is(1)); - assertThat(validationErrors.get(0), is("expected total copies needs to be a multiple of total awareness attributes [3]")); + assertThat(validationErrors.size(), is(2)); + assertThat( + validationErrors.get(0), + is("index setting [index.replication.type] is not allowed to be set as [cluster.restrict.index.replication_type=true]") + ); + assertThat(validationErrors.get(1), is("expected total copies needs to be a multiple of total awareness attributes [3]")); settings = Settings.builder() .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone, rack") @@ -1195,8 +1225,13 @@ public void testvalidateIndexSettings() { .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "rack.values", "c, d, e") .put(AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING.getKey(), true) .put(SETTING_NUMBER_OF_REPLICAS, 2) + .put(CLUSTER_RESTRICT_INDEX_REPLICATION_TYPE_SETTING.getKey(), false) + .put(SETTING_REPLICATION_TYPE, randomFrom(ReplicationType.values())) .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + validationErrors = checkerService.getIndexSettingsValidationErrors(settings, false, Optional.empty()); assertThat(validationErrors.size(), is(0)); @@ -1206,11 +1241,9 @@ public void testvalidateIndexSettings() { public void testRemoteStoreNoUserOverrideExceptReplicationTypeSegmentIndexSettings() { Settings settings = Settings.builder() .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") + .put(segmentRepositoryNameAttributeKey, "my-segment-repo-1") + .put(translogRepositoryNameAttributeKey, "my-translog-repo-1") .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); final Settings.Builder requestSettings = Settings.builder(); @@ -1224,7 +1257,38 @@ public void testRemoteStoreNoUserOverrideExceptReplicationTypeSegmentIndexSettin settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings + ); + verifyRemoteStoreIndexSettings( + indexSettings, + "true", + "my-segment-repo-1", + "my-translog-repo-1", + ReplicationType.SEGMENT.toString(), + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + ); + } + + public void testRemoteStoreImplicitOverrideReplicationTypeToSegmentForRemoteStore() { + Settings settings = Settings.builder() + .put(segmentRepositoryNameAttributeKey, "my-segment-repo-1") + .put(translogRepositoryNameAttributeKey, "my-translog-repo-1") + .build(); + + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + request.settings(requestSettings.build()); + Settings indexSettings = aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + settings, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings ); verifyRemoteStoreIndexSettings( indexSettings, @@ -1239,11 +1303,9 @@ public void testRemoteStoreNoUserOverrideExceptReplicationTypeSegmentIndexSettin public void testRemoteStoreNoUserOverrideIndexSettings() { Settings settings = Settings.builder() .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") + .put(segmentRepositoryNameAttributeKey, "my-segment-repo-1") + .put(translogRepositoryNameAttributeKey, "my-translog-repo-1") .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); Settings indexSettings = aggregateIndexSettings( @@ -1254,7 +1316,8 @@ public void testRemoteStoreNoUserOverrideIndexSettings() { settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); verifyRemoteStoreIndexSettings( indexSettings, @@ -1294,7 +1357,7 @@ public void testRemoteStoreDisabledByUserIndexSettings() { assertThat(validationErrors.size(), is(1)); assertThat( validationErrors.get(0), - is(String.format(Locale.ROOT, "expected [%s] to be private but it was not", SETTING_REMOTE_STORE_ENABLED)) + is(String.format(Locale.ROOT, "private index setting [%s] can not be set explicitly", SETTING_REMOTE_STORE_ENABLED)) ); })); } @@ -1328,7 +1391,13 @@ public void testRemoteStoreOverrideSegmentRepoIndexSettings() { assertThat(validationErrors.size(), is(1)); assertThat( validationErrors.get(0), - is(String.format(Locale.ROOT, "expected [%s] to be private but it was not", SETTING_REMOTE_SEGMENT_STORE_REPOSITORY)) + is( + String.format( + Locale.ROOT, + "private index setting [%s] can not be set explicitly", + SETTING_REMOTE_SEGMENT_STORE_REPOSITORY + ) + ) ); })); } @@ -1361,7 +1430,13 @@ public void testRemoteStoreOverrideTranslogRepoIndexSettings() { assertThat(validationErrors.size(), is(1)); assertThat( validationErrors.get(0), - is(String.format(Locale.ROOT, "expected [%s] to be private but it was not", SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY)) + is( + String.format( + Locale.ROOT, + "private index setting [%s] can not be set explicitly", + SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY + ) + ) ); })); } @@ -1435,7 +1510,8 @@ public void testSoftDeletesDisabledIsRejected() { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); }); assertThat( @@ -1464,7 +1540,8 @@ public void testValidateTranslogRetentionSettings() { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); assertWarnings( "Translog retention settings [index.translog.retention.age] " @@ -1511,7 +1588,8 @@ public void testDeprecatedSimpleFSStoreSettings() { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); assertWarnings( "[simplefs] is deprecated and will be removed in 2.0. Use [niofs], which offers equal " @@ -1530,7 +1608,8 @@ public void testClusterReplicationSetting() { settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); assertEquals(ReplicationType.SEGMENT.toString(), indexSettings.get(SETTING_REPLICATION_TYPE)); } @@ -1550,12 +1629,208 @@ public void testIndexSettingOverridesClusterReplicationSetting() { settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet() + Collections.emptySet(), + clusterSettings ); // Verify if index setting overrides cluster replication setting assertEquals(ReplicationType.DOCUMENT.toString(), indexSettings.get(SETTING_REPLICATION_TYPE)); } + public void testRefreshIntervalValidationWithNoIndexSetting() { + // This checks that aggregateIndexSetting works for the case where there are no index setting + // `index.refresh_interval` in the cluster state update request. + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + } + + public void testRefreshIntervalValidationSuccessWithIndexSettingEqualToClusterMinimum() { + // This checks that aggregateIndexSettings works for the case when the index setting `index.refresh_interval` + // is set to a value that is equal to the `cluster.default.index.refresh_interval` value. + TimeValue refreshInterval = TimeValue.timeValueSeconds(10); + Settings settings = Settings.builder() + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), refreshInterval) + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), refreshInterval) + .build(); + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + // Set index setting refresh interval the same value as the cluster minimum refresh interval + requestSettings.put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), refreshInterval); + request.settings(requestSettings.build()); + Settings indexSettings = aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + settings, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + // Verify that the value is the same as set as earlier and the validation was successful + assertEquals(refreshInterval, INDEX_REFRESH_INTERVAL_SETTING.get(indexSettings)); + } + + public void testRefreshIntervalValidationSuccessWithIndexSettingGreaterThanClusterMinimum() { + // This checks that aggregateIndexSettings works for the case when the index setting `index.refresh_interval` + // is set to a value that is greater than the `cluster.default.index.refresh_interval` value. + int clusterMinRefreshTimeMs = 10 * 1000; + TimeValue clusterMinRefreshTime = TimeValue.timeValueSeconds(clusterMinRefreshTimeMs); + Settings settings = Settings.builder() + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), clusterMinRefreshTime) + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), clusterMinRefreshTime) + .build(); + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + // Set index setting refresh interval the same value as the cluster minimum refresh interval + TimeValue indexRefreshTime = TimeValue.timeValueMillis(clusterMinRefreshTimeMs + randomNonNegativeLong()); + requestSettings.put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), indexRefreshTime); + request.settings(requestSettings.build()); + Settings indexSettings = aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + settings, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + // Verify that the value is the same as set as earlier and the validation was successful + assertEquals(indexRefreshTime, INDEX_REFRESH_INTERVAL_SETTING.get(indexSettings)); + } + + public void testRefreshIntervalValidationFailureWithIndexSetting() { + // This checks that aggregateIndexSettings works for the case when the index setting `index.refresh_interval` + // is set to a value that is below the `cluster.default.index.refresh_interval` value. + int clusterMinRefreshTimeMs = 10 * 1000; + TimeValue clusterMinRefreshTime = TimeValue.timeValueMillis(clusterMinRefreshTimeMs); + Settings settings = Settings.builder() + .put(CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING.getKey(), clusterMinRefreshTime) + .put(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.getKey(), clusterMinRefreshTime) + .build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + // Set index setting refresh interval the same value as the cluster minimum refresh interval + TimeValue indexRefreshTime = TimeValue.timeValueMillis(clusterMinRefreshTimeMs - randomIntBetween(1, clusterMinRefreshTimeMs - 1)); + requestSettings.put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), indexRefreshTime); + request.settings(requestSettings.build()); + IllegalArgumentException exception = assertThrows( + IllegalArgumentException.class, + () -> aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ) + ); + // verify that the message is as expected + assertEquals( + "invalid index.refresh_interval [" + + indexRefreshTime + + "]: cannot be smaller than cluster.minimum.index.refresh_interval [10s]", + exception.getMessage() + ); + } + + public void testAnyTranslogDurabilityWhenRestrictSettingFalse() { + // This checks that aggregateIndexSettings works for the case when the cluster setting + // cluster.remote_store.index.restrict.async-durability is false or not set, it allows all types of durability modes + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + Translog.Durability durability = randomFrom(Translog.Durability.values()); + requestSettings.put(INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability); + request.settings(requestSettings.build()); + if (randomBoolean()) { + Settings settings = Settings.builder().put(CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), false).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + } + Settings indexSettings = aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + assertFalse(clusterSettings.get(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING)); + assertEquals(durability, INDEX_TRANSLOG_DURABILITY_SETTING.get(indexSettings)); + } + + public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() { + // This checks that aggregateIndexSettings works for the case when the cluster setting + // cluster.remote_store.index.restrict.async-durability is false or not set, it allows all types of durability modes + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + requestSettings.put(INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC); + request.settings(requestSettings.build()); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), true).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + IllegalArgumentException exception = assertThrows( + IllegalArgumentException.class, + () -> aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.builder().put("node.attr.remote_store.setting", "test").build(), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ) + ); + // verify that the message is as expected + assertEquals( + "index setting [index.translog.durability=async] is not allowed as cluster setting [cluster.remote_store.index.restrict.async-durability=true]", + exception.getMessage() + ); + } + + public void testRequestDurabilityWhenRestrictSettingTrue() { + // This checks that aggregateIndexSettings works for the case when the cluster setting + // cluster.remote_store.index.restrict.async-durability is false or not set, it allows all types of durability modes + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + final Settings.Builder requestSettings = Settings.builder(); + requestSettings.put(INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST); + request.settings(requestSettings.build()); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING.getKey(), true).build(); + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + Settings indexSettings = aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + assertTrue(clusterSettings.get(IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING)); + assertEquals(Translog.Durability.REQUEST, INDEX_TRANSLOG_DURABILITY_SETTING.get(indexSettings)); + } + private IndexTemplateMetadata addMatchingTemplate(Consumer configurator) { IndexTemplateMetadata.Builder builder = templateMetadataBuilder("template1", "te*"); configurator.accept(builder); @@ -1615,4 +1890,9 @@ private void verifyRemoteStoreIndexSettings( assertEquals(translogBufferInterval, INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(indexSettings)); } + @After + public void shutdown() throws Exception { + clusterSettings = null; + } + } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java index fff39d14e9702..618fcb923bc60 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataTests.java @@ -52,6 +52,7 @@ import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.MapperPlugin; import org.opensearch.test.OpenSearchTestCase; @@ -626,6 +627,39 @@ public void testGlobalStateEqualsCoordinationMetadata() { assertFalse(Metadata.isGlobalStateEquals(metadata1, metadata2)); } + public void testGlobalResourcesStateEqualsCoordinationMetadata() { + CoordinationMetadata coordinationMetadata1 = new CoordinationMetadata( + randomNonNegativeLong(), + randomVotingConfig(), + randomVotingConfig(), + randomVotingConfigExclusions() + ); + Metadata metadata1 = Metadata.builder() + .coordinationMetadata(coordinationMetadata1) + .clusterUUID(randomAlphaOfLength(10)) + .clusterUUIDCommitted(false) + .hashesOfConsistentSettings(Map.of("a", "b")) + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build(); + CoordinationMetadata coordinationMetadata2 = new CoordinationMetadata( + randomNonNegativeLong(), + randomVotingConfig(), + randomVotingConfig(), + randomVotingConfigExclusions() + ); + Metadata metadata2 = Metadata.builder() + .coordinationMetadata(coordinationMetadata2) + .clusterUUIDCommitted(true) + .clusterUUID(randomAlphaOfLength(11)) + .hashesOfConsistentSettings(Map.of("b", "a")) + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build(); + + assertTrue(Metadata.isGlobalStateEquals(metadata1, metadata1)); + assertFalse(Metadata.isGlobalStateEquals(metadata1, metadata2)); + assertTrue(Metadata.isGlobalResourcesMetadataEquals(metadata1, metadata2)); + } + public void testSerializationWithIndexGraveyard() throws IOException { final IndexGraveyard graveyard = IndexGraveyardTests.createRandom(); final Metadata originalMeta = Metadata.builder().indexGraveyard(graveyard).build(); @@ -1425,6 +1459,29 @@ public void testMetadataBuildInvocations() { compareMetadata(previousMetadata, builtMetadata, false, true, true); } + public void testIsSegmentReplicationEnabled() { + final String indexName = "test"; + Settings.Builder builder = settings(Version.CURRENT).put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName) + .settings(builder) + .numberOfShards(1) + .numberOfReplicas(1); + Metadata.Builder metadataBuilder = Metadata.builder().put(indexMetadataBuilder); + Metadata metadata = metadataBuilder.build(); + assertTrue(metadata.isSegmentReplicationEnabled(indexName)); + } + + public void testIsSegmentReplicationDisabled() { + final String indexName = "test"; + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(1); + Metadata.Builder metadataBuilder = Metadata.builder().put(indexMetadataBuilder); + Metadata metadata = metadataBuilder.build(); + assertFalse(metadata.isSegmentReplicationEnabled(indexName)); + } + public static Metadata randomMetadata() { Metadata.Builder md = Metadata.builder() .put(buildIndexMetadata("index", "alias", randomBoolean() ? null : randomBoolean()).build(), randomBoolean()) diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java index c2e939bb2f4a5..8f149e32ec6f5 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java @@ -39,13 +39,16 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.test.NodeRoles; import org.opensearch.test.OpenSearchTestCase; import java.net.InetAddress; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.Locale; +import java.util.Map; import java.util.Set; import java.util.stream.Collectors; @@ -82,6 +85,22 @@ public void testRolesAreSorted() { } + public void testRemoteStoreRedactionInToString() { + final Set roles = new HashSet<>(randomSubsetOf(DiscoveryNodeRole.BUILT_IN_ROLES)); + Map attributes = new HashMap<>(); + attributes.put(RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test-repo"); + attributes.put(RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, "test-repo"); + final DiscoveryNode node = new DiscoveryNode( + "name", + "id", + new TransportAddress(TransportAddress.META_ADDRESS, 9200), + attributes, + roles, + Version.CURRENT + ); + assertFalse(node.toString().contains(RemoteStoreNodeAttribute.REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX)); + } + public void testDiscoveryNodeIsCreatedWithHostFromInetAddress() throws Exception { InetAddress inetAddress = randomBoolean() ? InetAddress.getByName("192.0.2.1") diff --git a/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java b/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java index 6e7583fbd75d5..8542ff53c6ff1 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java @@ -50,13 +50,18 @@ import org.opensearch.repositories.IndexId; import org.junit.Before; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Predicate; import java.util.stream.Collectors; +import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; +import static org.opensearch.cluster.routing.ShardRoutingState.RELOCATING; +import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.cluster.routing.ShardRoutingState.UNASSIGNED; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -64,6 +69,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class RoutingTableTests extends OpenSearchAllocationTestCase { @@ -540,8 +546,47 @@ public void testAddAsRecovery() { } } - public void testAddAsRemoteStoreRestore() { - final IndexMetadata indexMetadata = createIndexMetadata(TEST_INDEX_1).state(IndexMetadata.State.OPEN).build(); + private Map getIndexShardRoutingTableMap(Index index, boolean allUnassigned, int numberOfReplicas) { + Map indexShardRoutingTableMap = new HashMap<>(); + List activeInitializingStates = List.of(INITIALIZING, STARTED, RELOCATING); + for (int i = 0; i < this.numberOfShards; i++) { + IndexShardRoutingTable indexShardRoutingTable = mock(IndexShardRoutingTable.class); + ShardRouting primaryShardRouting = mock(ShardRouting.class); + Boolean primaryUnassigned = allUnassigned || randomBoolean(); + when(primaryShardRouting.unassigned()).thenReturn(primaryUnassigned); + if (primaryUnassigned) { + when(primaryShardRouting.state()).thenReturn(UNASSIGNED); + } else { + when(primaryShardRouting.state()).thenReturn( + activeInitializingStates.get(randomIntBetween(0, activeInitializingStates.size() - 1)) + ); + } + when(indexShardRoutingTable.primaryShard()).thenReturn(primaryShardRouting); + List replicaShards = new ArrayList<>(); + for (int j = 0; j < numberOfReplicas; j++) { + ShardRouting replicaShardRouting = mock(ShardRouting.class); + Boolean replicaUnassigned = allUnassigned || randomBoolean(); + when(replicaShardRouting.unassigned()).thenReturn(replicaUnassigned); + if (replicaUnassigned) { + when(replicaShardRouting.state()).thenReturn(UNASSIGNED); + } else { + when(replicaShardRouting.state()).thenReturn( + activeInitializingStates.get(randomIntBetween(0, activeInitializingStates.size() - 1)) + ); + } + replicaShards.add(replicaShardRouting); + } + when(indexShardRoutingTable.replicaShards()).thenReturn(replicaShards); + indexShardRoutingTableMap.put(new ShardId(index, i), indexShardRoutingTable); + } + return indexShardRoutingTableMap; + } + + public void testAddAsRemoteStoreRestoreAllUnassigned() { + int numberOfReplicas = randomIntBetween(0, 5); + final IndexMetadata indexMetadata = createIndexMetadata(TEST_INDEX_1).state(IndexMetadata.State.OPEN) + .numberOfReplicas(numberOfReplicas) + .build(); final RemoteStoreRecoverySource remoteStoreRecoverySource = new RemoteStoreRecoverySource( "restore_uuid", Version.CURRENT, @@ -550,34 +595,78 @@ public void testAddAsRemoteStoreRestore() { final RoutingTable routingTable = new RoutingTable.Builder().addAsRemoteStoreRestore( indexMetadata, remoteStoreRecoverySource, - new HashMap<>() + getIndexShardRoutingTableMap(indexMetadata.getIndex(), true, numberOfReplicas), + false ).build(); assertTrue(routingTable.hasIndex(TEST_INDEX_1)); - assertEquals(this.numberOfShards, routingTable.allShards(TEST_INDEX_1).size()); - assertEquals(this.numberOfShards, routingTable.index(TEST_INDEX_1).shardsWithState(UNASSIGNED).size()); + int numberOfShards = this.numberOfShards * (numberOfReplicas + 1); + assertEquals(numberOfShards, routingTable.allShards(TEST_INDEX_1).size()); + assertEquals(numberOfShards, routingTable.index(TEST_INDEX_1).shardsWithState(UNASSIGNED).size()); } public void testAddAsRemoteStoreRestoreWithActiveShards() { - final IndexMetadata indexMetadata = createIndexMetadata(TEST_INDEX_1).state(IndexMetadata.State.OPEN).build(); + int numberOfReplicas = randomIntBetween(0, 5); + final IndexMetadata indexMetadata = createIndexMetadata(TEST_INDEX_1).state(IndexMetadata.State.OPEN) + .numberOfReplicas(numberOfReplicas) + .build(); final RemoteStoreRecoverySource remoteStoreRecoverySource = new RemoteStoreRecoverySource( "restore_uuid", Version.CURRENT, new IndexId(TEST_INDEX_1, "1") ); - Map activeInitializingShards = new HashMap<>(); - for (int i = 0; i < randomIntBetween(1, this.numberOfShards); i++) { - activeInitializingShards.put(new ShardId(indexMetadata.getIndex(), i), mock(ShardRouting.class)); - } + Map indexShardRoutingTableMap = getIndexShardRoutingTableMap( + indexMetadata.getIndex(), + false, + numberOfReplicas + ); final RoutingTable routingTable = new RoutingTable.Builder().addAsRemoteStoreRestore( indexMetadata, remoteStoreRecoverySource, - activeInitializingShards + indexShardRoutingTableMap, + false ).build(); assertTrue(routingTable.hasIndex(TEST_INDEX_1)); - assertEquals(this.numberOfShards, routingTable.allShards(TEST_INDEX_1).size()); - assertEquals( - this.numberOfShards - activeInitializingShards.size(), - routingTable.index(TEST_INDEX_1).shardsWithState(UNASSIGNED).size() + int numberOfShards = this.numberOfShards * (numberOfReplicas + 1); + assertEquals(numberOfShards, routingTable.allShards(TEST_INDEX_1).size()); + int unassignedShards = 0; + for (IndexShardRoutingTable indexShardRoutingTable : indexShardRoutingTableMap.values()) { + if (indexShardRoutingTable.primaryShard().unassigned()) { + unassignedShards += indexShardRoutingTable.replicaShards().size() + 1; + } else { + for (ShardRouting replicaShardRouting : indexShardRoutingTable.replicaShards()) { + if (replicaShardRouting.unassigned()) { + unassignedShards += 1; + } + } + } + } + assertEquals(unassignedShards, routingTable.index(TEST_INDEX_1).shardsWithState(UNASSIGNED).size()); + } + + public void testAddAsRemoteStoreRestoreShardMismatch() { + int numberOfReplicas = randomIntBetween(0, 5); + final IndexMetadata indexMetadata = createIndexMetadata(TEST_INDEX_1).state(IndexMetadata.State.OPEN) + .numberOfReplicas(numberOfReplicas) + .build(); + final RemoteStoreRecoverySource remoteStoreRecoverySource = new RemoteStoreRecoverySource( + "restore_uuid", + Version.CURRENT, + new IndexId(TEST_INDEX_1, "1") + ); + Map indexShardRoutingTableMap = getIndexShardRoutingTableMap( + indexMetadata.getIndex(), + true, + numberOfReplicas + ); + indexShardRoutingTableMap.remove(indexShardRoutingTableMap.keySet().iterator().next()); + assertThrows( + IllegalStateException.class, + () -> new RoutingTable.Builder().addAsRemoteStoreRestore( + indexMetadata, + remoteStoreRecoverySource, + indexShardRoutingTableMap, + false + ).build() ); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java index b1ba52204c47a..5c0bdc8547f8b 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java @@ -31,6 +31,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; @@ -94,8 +95,8 @@ public void setUpService() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> clusterService.state().nodes().get("nodes1"), null, - Collections.emptySet() - + Collections.emptySet(), + NoopTracer.INSTANCE ); Settings.Builder settingsBuilder = Settings.builder() diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationPriorityTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationPriorityTests.java index f9f181402da1b..8cd664c8c13fc 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationPriorityTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AllocationPriorityTests.java @@ -50,7 +50,7 @@ public class AllocationPriorityTests extends OpenSearchAllocationTestCase { /** * Tests that higher prioritized primaries and replicas are allocated first even on the balanced shard allocator - * See https://github.com/elastic/elasticsearch/issues/13249 for details + * See elasticsearch issue #13249 for details */ public void testPrioritizedIndicesAllocatedFirst() { AllocationService allocation = createAllocationService( diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java index f87bc8cb425f2..62dd14e69c402 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -247,13 +247,13 @@ public void testPrimaryBalanceWithPreferPrimaryBalanceSetting() { /** * This test verifies the allocation logic when nodes breach multiple constraints and ensure node breaching min * constraints chosen for allocation. - * + *

            * This test mimics a cluster state containing four nodes, where one node breaches two constraints while one breaches * only one. In order to have nodes breach constraints, test excludes two nodes (node2, node3) from allocation so * that other two nodes (node0, node1) have all shards assignments resulting in constraints breach. Test asserts that * the new primary shard assignment lands on the node breaching one constraint(node1), while replica land on the other * (node0). Final shard allocation state. - * + *

            routing_nodes: -----node_id[node2][V] -----node_id[node3][V] @@ -385,13 +385,13 @@ public void testGlobalPrimaryBalance() throws Exception { * This test mimics a cluster state which can not be rebalanced due to * {@link org.opensearch.cluster.routing.allocation.decider.SameShardAllocationDecider} * allocation decider which prevents shard relocation, leaving cluster unbalanced on primaries. - * + *

            * There are two nodes (N1, N2) where all primaries land on N1 while replicas on N2. * N1 N2 * ------ -------- * P1 R1 * P2 R2 - * + *

            * -----node_id[node_0][V] * --------[test][1], node[node_0], [P], s[STARTED], a[id=xqfZSToVSQaff2xvuxh_yA] * --------[test][0], node[node_0], [P], s[STARTED], a[id=VGjOeBGdSmu3pJR6T7v29A] @@ -455,14 +455,14 @@ public void testPrimaryBalance_NotSolved_1() { * This test mimics cluster state where re-balancing is not possible due to existing limitation of re-balancing * logic which applies at index level i.e. balance shards single index across all nodes. This will be solved when * primary shard count across indices, constraint is added. - * + *

            * Please note, P1, P2 belongs to different index - * + *

            * N1 N2 * ------ -------- * P1 R1 * P2 R2 - * + *

            * -----node_id[node_0][V] * --------[test1][0], node[node_0], [P], s[STARTED], a[id=u7qtyy5AR42hgEa-JpeArg] * --------[test0][0], node[node_0], [P], s[STARTED], a[id=BQrLSo6sQyGlcLdVvGgqLQ] diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/CatAllocationTestCase.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/CatAllocationTestCase.java index 10271cad33fec..8f90882c21804 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/CatAllocationTestCase.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/CatAllocationTestCase.java @@ -66,7 +66,7 @@ * A base testcase that allows to run tests based on the output of the CAT API * The input is a line based cat/shards output like: * kibana-int 0 p STARTED 2 24.8kb 10.202.245.2 r5-9-35 - * + *

            * the test builds up a clusterstate from the cat input and optionally runs a full balance on it. * This can be used to debug cluster allocation decisions. */ diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java index 8bbd2f95e4008..02188ba116c46 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -53,6 +53,7 @@ import org.opensearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.opensearch.common.settings.Settings; import org.opensearch.indices.cluster.ClusterStateChanges; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -69,6 +70,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.hamcrest.Matchers.equalTo; @@ -137,7 +139,15 @@ public void testSimpleFailedNodeTest() { } } + public void testRandomClusterPromotesOldestReplica() throws InterruptedException { + testRandomClusterPromotesReplica(true); + } + public void testRandomClusterPromotesNewestReplica() throws InterruptedException { + testRandomClusterPromotesReplica(false); + } + + void testRandomClusterPromotesReplica(boolean isSegmentReplicationEnabled) throws InterruptedException { ThreadPool threadPool = new TestThreadPool(getClass().getName()); ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool); @@ -164,6 +174,9 @@ public void testRandomClusterPromotesNewestReplica() throws InterruptedException Settings.Builder settingsBuilder = Settings.builder() .put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 4)) .put(SETTING_NUMBER_OF_REPLICAS, randomIntBetween(2, 4)); + if (isSegmentReplicationEnabled) { + settingsBuilder.put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); + } CreateIndexRequest request = new CreateIndexRequest(name, settingsBuilder.build()).waitForActiveShards(ActiveShardCount.NONE); state = cluster.createIndex(state, request); assertTrue(state.metadata().hasIndex(name)); @@ -206,13 +219,23 @@ public void testRandomClusterPromotesNewestReplica() throws InterruptedException Version candidateVer = getNodeVersion(sr, compareState); if (candidateVer != null) { logger.info("--> candidate on {} node; shard routing: {}", candidateVer, sr); - assertTrue( - "candidate was not on the newest version, new primary is on " - + newPrimaryVersion - + " and there is a candidate on " - + candidateVer, - candidateVer.onOrBefore(newPrimaryVersion) - ); + if (isSegmentReplicationEnabled) { + assertTrue( + "candidate was not on the oldest version, new primary is on " + + newPrimaryVersion + + " and there is a candidate on " + + candidateVer, + candidateVer.onOrAfter(newPrimaryVersion) + ); + } else { + assertTrue( + "candidate was not on the newest version, new primary is on " + + newPrimaryVersion + + " and there is a candidate on " + + candidateVer, + candidateVer.onOrBefore(newPrimaryVersion) + ); + } } }); }); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedShardsRoutingTests.java index f2dc745ad33bf..db4cedbbbe7b5 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -49,6 +49,7 @@ import org.opensearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.VersionUtils; import java.util.ArrayList; @@ -647,10 +648,21 @@ public void testFailAllReplicasInitializingOnPrimaryFailWhileHavingAReplicaToEle } public void testReplicaOnNewestVersionIsPromoted() { + testReplicaIsPromoted(false); + } + + public void testReplicaOnOldestVersionIsPromoted() { + testReplicaIsPromoted(true); + } + + private void testReplicaIsPromoted(boolean isSegmentReplicationEnabled) { AllocationService allocation = createAllocationService(Settings.builder().build()); + Settings.Builder settingsBuilder = isSegmentReplicationEnabled + ? settings(Version.CURRENT).put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + : settings(Version.CURRENT); Metadata metadata = Metadata.builder() - .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(3)) + .put(IndexMetadata.builder("test").settings(settingsBuilder).numberOfShards(1).numberOfReplicas(3)) .build(); RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); @@ -714,7 +726,12 @@ public void testReplicaOnNewestVersionIsPromoted() { assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(0)); - ShardRouting startedReplica = clusterState.getRoutingNodes().activeReplicaWithHighestVersion(shardId); + ShardRouting startedReplica; + if (isSegmentReplicationEnabled) { + startedReplica = clusterState.getRoutingNodes().activeReplicaWithOldestVersion(shardId); + } else { + startedReplica = clusterState.getRoutingNodes().activeReplicaWithHighestVersion(shardId); + } logger.info("--> all shards allocated, replica that should be promoted: {}", startedReplica); // fail the primary shard again and make sure the correct replica is promoted @@ -739,13 +756,24 @@ public void testReplicaOnNewestVersionIsPromoted() { continue; } Version nodeVer = cursor.getVersion(); - assertTrue( - "expected node [" + cursor.getId() + "] with version " + nodeVer + " to be before " + replicaNodeVersion, - replicaNodeVersion.onOrAfter(nodeVer) - ); + if (isSegmentReplicationEnabled) { + assertTrue( + "expected node [" + cursor.getId() + "] with version " + nodeVer + " to be after " + replicaNodeVersion, + replicaNodeVersion.onOrBefore(nodeVer) + ); + } else { + assertTrue( + "expected node [" + cursor.getId() + "] with version " + nodeVer + " to be before " + replicaNodeVersion, + replicaNodeVersion.onOrAfter(nodeVer) + ); + } } - startedReplica = clusterState.getRoutingNodes().activeReplicaWithHighestVersion(shardId); + if (isSegmentReplicationEnabled) { + startedReplica = clusterState.getRoutingNodes().activeReplicaWithOldestVersion(shardId); + } else { + startedReplica = clusterState.getRoutingNodes().activeReplicaWithHighestVersion(shardId); + } logger.info("--> failing primary shard a second time, should select: {}", startedReplica); // fail the primary shard again, and ensure the same thing happens @@ -771,10 +799,17 @@ public void testReplicaOnNewestVersionIsPromoted() { continue; } Version nodeVer = cursor.getVersion(); - assertTrue( - "expected node [" + cursor.getId() + "] with version " + nodeVer + " to be before " + replicaNodeVersion, - replicaNodeVersion.onOrAfter(nodeVer) - ); + if (isSegmentReplicationEnabled) { + assertTrue( + "expected node [" + cursor.getId() + "] with version " + nodeVer + " to be after " + replicaNodeVersion, + replicaNodeVersion.onOrBefore(nodeVer) + ); + } else { + assertTrue( + "expected node [" + cursor.getId() + "] with version " + nodeVer + " to be before " + replicaNodeVersion, + replicaNodeVersion.onOrAfter(nodeVer) + ); + } } } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java index cf32d2b3cf00f..7f2f048485318 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardConstraintDeciderOverlapTests.java @@ -30,7 +30,7 @@ public class IndexShardConstraintDeciderOverlapTests extends OpenSearchAllocatio /** * High watermark breach blocks new shard allocations to affected nodes. If shard count on such * nodes is low, this will cause IndexShardPerNodeConstraint to breach. - * + *

            * This test verifies that this doesn't lead to unassigned shards, and there are no hot spots in eligible * nodes. */ diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardHotSpotTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardHotSpotTests.java index 2efbb256e36bc..617c9b4701722 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardHotSpotTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/IndexShardHotSpotTests.java @@ -58,7 +58,7 @@ public void testUnderReplicatedClusterScaleOut() { /** * Test cluster scale in scenario, when nodes are gracefully excluded from * cluster before termination. - * + *

            * During moveShards(), shards are picked from across indexes in an interleaved manner. * This prevents hot spots by evenly picking up shards. Since shard order can change * in subsequent runs. diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java index ef9ae90e18bb5..e1c0a7eff1f6e 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java @@ -21,7 +21,7 @@ public class RemoteShardsRebalanceShardsTests extends RemoteShardsBalancerBaseTe /** * Test remote shard allocation and balancing for standard new cluster setup. - * + *

            * Post rebalance primaries should be balanced across all the nodes. */ public void testShardAllocationAndRebalance() { @@ -72,7 +72,7 @@ private int getTotalShardCountAcrossNodes(final Map nodePrimari /** * Asserts that the expected value is within the variance range. - * + *

            * Being used to assert the average number of shards per node. * Variance is required in case of non-absolute mean values; * for example, total number of remote capable nodes in a cluster. diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java index 9cdbe04e0a0e4..85f6c129944fa 100644 --- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java @@ -487,6 +487,9 @@ public void onFailure(String source, Exception e) { } }); assertBusy(mockAppender::assertAllExpectationsMatched); + // verify stats values after state is published + assertEquals(1, clusterManagerService.getClusterStateStats().getUpdateSuccess()); + assertEquals(0, clusterManagerService.getClusterStateStats().getUpdateFailed()); } } } diff --git a/server/src/test/java/org/opensearch/common/RoundingTests.java b/server/src/test/java/org/opensearch/common/RoundingTests.java index e0c44e3516e7b..cc71ee08abcca 100644 --- a/server/src/test/java/org/opensearch/common/RoundingTests.java +++ b/server/src/test/java/org/opensearch/common/RoundingTests.java @@ -33,7 +33,6 @@ package org.opensearch.common; import org.opensearch.common.collect.Tuple; -import org.opensearch.common.rounding.DateTimeUnit; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateFormatters; import org.opensearch.common.unit.TimeValue; @@ -236,10 +235,10 @@ public void testOffsetRounding() { /** * Randomized test on TimeUnitRounding. Test uses random - * {@link DateTimeUnit} and {@link ZoneId} and often (50% of the time) + * {@link org.opensearch.common.Rounding.DateTimeUnit} and {@link ZoneId} and often (50% of the time) * chooses test dates that are exactly on or close to offset changes (e.g. * DST) in the chosen time zone. - * + *

            * It rounds the test date down and up and performs various checks on the * rounding unit interval that is defined by this. Assumptions tested are * described in @@ -1143,6 +1142,28 @@ public void testNonMillisecondsBasedUnitCalendarRoundingSize() { assertThat(prepared.roundingSize(thirdQuarter, Rounding.DateTimeUnit.HOUR_OF_DAY), closeTo(2208.0, 0.000001)); } + public void testArrayRoundingImplementations() { + int length = randomIntBetween(1, 256); + long[] values = new long[length]; + for (int i = 1; i < values.length; i++) { + values[i] = values[i - 1] + (randomNonNegativeLong() % 100); + } + + Rounding.Prepared binarySearchImpl = new Rounding.BinarySearchArrayRounding(values, length, null); + Rounding.Prepared linearSearchImpl = new Rounding.BidirectionalLinearSearchArrayRounding(values, length, null); + + for (int i = 0; i < 100000; i++) { + long key = values[0] + (randomNonNegativeLong() % (100 + values[length - 1] - values[0])); + assertEquals(binarySearchImpl.round(key), linearSearchImpl.round(key)); + } + + AssertionError exception = expectThrows(AssertionError.class, () -> { binarySearchImpl.round(values[0] - 1); }); + assertEquals("utcMillis must be after " + values[0], exception.getMessage()); + + exception = expectThrows(AssertionError.class, () -> { linearSearchImpl.round(values[0] - 1); }); + assertEquals("utcMillis must be after " + values[0], exception.getMessage()); + } + private void assertInterval(long rounded, long nextRoundingValue, Rounding rounding, int minutes, ZoneId tz) { assertInterval(rounded, dateBetween(rounded, nextRoundingValue), nextRoundingValue, rounding, tz); long millisPerMinute = 60_000; diff --git a/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java b/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java new file mode 100644 index 0000000000000..1780819390052 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java @@ -0,0 +1,125 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore; + +import org.opensearch.common.Randomness; +import org.opensearch.common.blobstore.stream.read.ReadContext; +import org.opensearch.common.blobstore.stream.read.listener.ListenerTestUtils; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.DecryptedRangedStreamProvider; +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.core.action.ActionListener; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.function.UnaryOperator; + +import org.mockito.Mockito; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class AsyncMultiStreamEncryptedBlobContainerTests extends OpenSearchTestCase { + + // Tests the happy path scenario for decrypting a read context + @SuppressWarnings("unchecked") + public void testReadBlobAsync() throws Exception { + String testBlobName = "testBlobName"; + int size = 100; + + // Mock objects needed for the test + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + CryptoHandler cryptoHandler = mock(CryptoHandler.class); + Object cryptoContext = mock(Object.class); + when(cryptoHandler.loadEncryptionMetadata(any())).thenReturn(cryptoContext); + when(cryptoHandler.estimateDecryptedLength(any(), anyLong())).thenReturn((long) size); + long[] adjustedRanges = { 0, size - 1 }; + DecryptedRangedStreamProvider rangedStreamProvider = new DecryptedRangedStreamProvider(adjustedRanges, UnaryOperator.identity()); + when(cryptoHandler.createDecryptingStreamOfRange(eq(cryptoContext), anyLong(), anyLong())).thenReturn(rangedStreamProvider); + + // Objects needed for API call + final byte[] data = new byte[size]; + Randomness.get().nextBytes(data); + + final InputStreamContainer inputStreamContainer = new InputStreamContainer(new ByteArrayInputStream(data), data.length, 0); + final ListenerTestUtils.CountingCompletionListener completionListener = + new ListenerTestUtils.CountingCompletionListener<>(); + final CompletableFuture streamContainerFuture = CompletableFuture.completedFuture(inputStreamContainer); + final ReadContext readContext = new ReadContext(size, List.of(() -> streamContainerFuture), null); + + Mockito.doAnswer(invocation -> { + ActionListener readContextActionListener = invocation.getArgument(1); + readContextActionListener.onResponse(readContext); + return null; + }).when(blobContainer).readBlobAsync(eq(testBlobName), any()); + + AsyncMultiStreamEncryptedBlobContainer asyncMultiStreamEncryptedBlobContainer = + new AsyncMultiStreamEncryptedBlobContainer<>(blobContainer, cryptoHandler); + asyncMultiStreamEncryptedBlobContainer.readBlobAsync(testBlobName, completionListener); + + // Assert results + ReadContext response = completionListener.getResponse(); + assertEquals(0, completionListener.getFailureCount()); + assertEquals(1, completionListener.getResponseCount()); + assertNull(completionListener.getException()); + + assertTrue(response instanceof AsyncMultiStreamEncryptedBlobContainer.DecryptedReadContext); + assertEquals(1, response.getNumberOfParts()); + assertEquals(size, response.getBlobSize()); + + InputStreamContainer responseContainer = response.getPartStreams().get(0).get().join(); + assertEquals(0, responseContainer.getOffset()); + assertEquals(size, responseContainer.getContentLength()); + assertEquals(100, responseContainer.getInputStream().available()); + } + + // Tests the exception scenario for decrypting a read context + @SuppressWarnings("unchecked") + public void testReadBlobAsyncException() throws Exception { + String testBlobName = "testBlobName"; + int size = 100; + + // Mock objects needed for the test + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + CryptoHandler cryptoHandler = mock(CryptoHandler.class); + when(cryptoHandler.loadEncryptionMetadata(any())).thenThrow(new IOException()); + + // Objects needed for API call + final byte[] data = new byte[size]; + Randomness.get().nextBytes(data); + final InputStreamContainer inputStreamContainer = new InputStreamContainer(new ByteArrayInputStream(data), data.length, 0); + final ListenerTestUtils.CountingCompletionListener completionListener = + new ListenerTestUtils.CountingCompletionListener<>(); + final CompletableFuture streamContainerFuture = CompletableFuture.completedFuture(inputStreamContainer); + final ReadContext readContext = new ReadContext(size, List.of(() -> streamContainerFuture), null); + + Mockito.doAnswer(invocation -> { + ActionListener readContextActionListener = invocation.getArgument(1); + readContextActionListener.onResponse(readContext); + return null; + }).when(blobContainer).readBlobAsync(eq(testBlobName), any()); + + AsyncMultiStreamEncryptedBlobContainer asyncMultiStreamEncryptedBlobContainer = + new AsyncMultiStreamEncryptedBlobContainer<>(blobContainer, cryptoHandler); + asyncMultiStreamEncryptedBlobContainer.readBlobAsync(testBlobName, completionListener); + + // Assert results + assertEquals(1, completionListener.getFailureCount()); + assertEquals(0, completionListener.getResponseCount()); + assertNull(completionListener.getResponse()); + assertTrue(completionListener.getException() instanceof IOException); + } + +} diff --git a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriterTests.java b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriterTests.java new file mode 100644 index 0000000000000..f2a758b9bbe10 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/FilePartWriterTests.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.stream.read.listener; + +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.UUID; +import java.util.function.UnaryOperator; + +public class FilePartWriterTests extends OpenSearchTestCase { + + private Path path; + + @Before + public void init() throws Exception { + path = createTempDir("FilePartWriterTests"); + } + + public void testFilePartWriter() throws Exception { + Path segmentFilePath = path.resolve(UUID.randomUUID().toString()); + int contentLength = 100; + InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(contentLength)); + InputStreamContainer inputStreamContainer = new InputStreamContainer(inputStream, inputStream.available(), 0); + + FilePartWriter.write(segmentFilePath, inputStreamContainer, UnaryOperator.identity()); + + assertTrue(Files.exists(segmentFilePath)); + assertEquals(contentLength, Files.size(segmentFilePath)); + } + + public void testFilePartWriterWithOffset() throws Exception { + Path segmentFilePath = path.resolve(UUID.randomUUID().toString()); + int contentLength = 100; + int offset = 10; + InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(contentLength)); + InputStreamContainer inputStreamContainer = new InputStreamContainer(inputStream, inputStream.available(), offset); + + FilePartWriter.write(segmentFilePath, inputStreamContainer, UnaryOperator.identity()); + + assertTrue(Files.exists(segmentFilePath)); + assertEquals(contentLength + offset, Files.size(segmentFilePath)); + } + + public void testFilePartWriterLargeInput() throws Exception { + Path segmentFilePath = path.resolve(UUID.randomUUID().toString()); + int contentLength = 20 * 1024 * 1024; + InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(contentLength)); + InputStreamContainer inputStreamContainer = new InputStreamContainer(inputStream, contentLength, 0); + + FilePartWriter.write(segmentFilePath, inputStreamContainer, UnaryOperator.identity()); + + assertTrue(Files.exists(segmentFilePath)); + assertEquals(contentLength, Files.size(segmentFilePath)); + } +} diff --git a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ListenerTestUtils.java b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ListenerTestUtils.java new file mode 100644 index 0000000000000..a3a32f6db2148 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ListenerTestUtils.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.stream.read.listener; + +import org.opensearch.core.action.ActionListener; + +/** + * Utility class containing common functionality for read listener based tests + */ +public class ListenerTestUtils { + + /** + * CountingCompletionListener acts as a verification instance for wrapping listener based calls. + * Keeps track of the last response, failure and count of response and failure invocations. + */ + public static class CountingCompletionListener implements ActionListener { + private int responseCount; + private int failureCount; + private T response; + private Exception exception; + + @Override + public void onResponse(T response) { + this.response = response; + responseCount++; + } + + @Override + public void onFailure(Exception e) { + exception = e; + failureCount++; + } + + public int getResponseCount() { + return responseCount; + } + + public int getFailureCount() { + return failureCount; + } + + public T getResponse() { + return response; + } + + public Exception getException() { + return exception; + } + } +} diff --git a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java new file mode 100644 index 0000000000000..0163c2275e7f4 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java @@ -0,0 +1,229 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.stream.read.listener; + +import org.apache.lucene.tests.util.LuceneTestCase.SuppressFileSystems; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.common.blobstore.stream.read.ReadContext; +import org.opensearch.common.io.InputStreamContainer; +import org.opensearch.core.action.ActionListener; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.function.UnaryOperator; + +import static org.opensearch.common.blobstore.stream.read.listener.ListenerTestUtils.CountingCompletionListener; + +/* + WindowsFS tries to simulate file handles in a best case simulation. + The deletion for the open file on an actual Windows system will be performed as soon as the last handle + is closed, which this simulation does not account for. Preventing use of WindowsFS for these tests. + */ +@SuppressFileSystems("WindowsFS") +public class ReadContextListenerTests extends OpenSearchTestCase { + + private Path path; + private static ThreadPool threadPool; + private static final int NUMBER_OF_PARTS = 5; + private static final int PART_SIZE = 10; + private static final String TEST_SEGMENT_FILE = "test_segment_file"; + private static final int MAX_CONCURRENT_STREAMS = 10; + + @BeforeClass + public static void setup() { + threadPool = new TestThreadPool(ReadContextListenerTests.class.getName()); + } + + @AfterClass + public static void cleanup() { + threadPool.shutdown(); + } + + @Before + public void init() throws Exception { + path = createTempDir("ReadContextListenerTests"); + } + + public void testReadContextListener() throws InterruptedException, IOException { + Path fileLocation = path.resolve(UUID.randomUUID().toString()); + List blobPartStreams = initializeBlobPartStreams(); + CountDownLatch countDownLatch = new CountDownLatch(1); + ActionListener completionListener = new LatchedActionListener<>(new PlainActionFuture<>(), countDownLatch); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + completionListener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); + ReadContext readContext = new ReadContext((long) PART_SIZE * NUMBER_OF_PARTS, blobPartStreams, null); + readContextListener.onResponse(readContext); + + countDownLatch.await(); + + assertTrue(Files.exists(fileLocation)); + assertEquals(NUMBER_OF_PARTS * PART_SIZE, Files.size(fileLocation)); + } + + public void testReadContextListenerFailure() throws Exception { + Path fileLocation = path.resolve(UUID.randomUUID().toString()); + List blobPartStreams = initializeBlobPartStreams(); + CountDownLatch countDownLatch = new CountDownLatch(1); + ActionListener completionListener = new LatchedActionListener<>(new PlainActionFuture<>(), countDownLatch); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + completionListener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); + InputStream badInputStream = new InputStream() { + + @Override + public int read(byte[] b, int off, int len) throws IOException { + return read(); + } + + @Override + public int read() throws IOException { + throw new IOException(); + } + + @Override + public int available() { + return PART_SIZE; + } + }; + + blobPartStreams.add( + NUMBER_OF_PARTS, + () -> CompletableFuture.supplyAsync( + () -> new InputStreamContainer(badInputStream, PART_SIZE, PART_SIZE * NUMBER_OF_PARTS), + threadPool.generic() + ) + ); + ReadContext readContext = new ReadContext((long) (PART_SIZE + 1) * NUMBER_OF_PARTS, blobPartStreams, null); + readContextListener.onResponse(readContext); + + countDownLatch.await(); + assertFalse(Files.exists(fileLocation)); + assertFalse(Files.exists(readContextListener.getTmpFileLocation())); + } + + public void testReadContextListenerException() { + Path fileLocation = path.resolve(UUID.randomUUID().toString()); + CountingCompletionListener listener = new CountingCompletionListener(); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + listener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); + IOException exception = new IOException(); + readContextListener.onFailure(exception); + assertEquals(1, listener.getFailureCount()); + assertEquals(exception, listener.getException()); + } + + public void testWriteToTempFile() throws Exception { + final String fileName = UUID.randomUUID().toString(); + Path fileLocation = path.resolve(fileName); + List blobPartStreams = initializeBlobPartStreams(); + CountDownLatch countDownLatch = new CountDownLatch(1); + ActionListener completionListener = new LatchedActionListener<>(new PlainActionFuture<>(), countDownLatch); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + completionListener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); + ByteArrayInputStream assertingStream = new ByteArrayInputStream(randomByteArrayOfLength(PART_SIZE)) { + @Override + public int read(byte[] b) throws IOException { + assertTrue("parts written to temp file location", Files.exists(readContextListener.getTmpFileLocation())); + return super.read(b); + } + }; + blobPartStreams.add( + NUMBER_OF_PARTS, + () -> CompletableFuture.supplyAsync( + () -> new InputStreamContainer(assertingStream, PART_SIZE, PART_SIZE * NUMBER_OF_PARTS), + threadPool.generic() + ) + ); + ReadContext readContext = new ReadContext((long) (PART_SIZE + 1) * NUMBER_OF_PARTS + 1, blobPartStreams, null); + readContextListener.onResponse(readContext); + + countDownLatch.await(); + assertTrue(Files.exists(fileLocation)); + assertFalse(Files.exists(readContextListener.getTmpFileLocation())); + } + + public void testWriteToTempFile_alreadyExists_replacesFile() throws Exception { + final String fileName = UUID.randomUUID().toString(); + Path fileLocation = path.resolve(fileName); + // create an empty file at location. + Files.createFile(fileLocation); + assertEquals(0, Files.readAllBytes(fileLocation).length); + List blobPartStreams = initializeBlobPartStreams(); + CountDownLatch countDownLatch = new CountDownLatch(1); + ActionListener completionListener = new LatchedActionListener<>(new PlainActionFuture<>(), countDownLatch); + ReadContextListener readContextListener = new ReadContextListener( + TEST_SEGMENT_FILE, + fileLocation, + completionListener, + threadPool, + UnaryOperator.identity(), + MAX_CONCURRENT_STREAMS + ); + ReadContext readContext = new ReadContext((long) (PART_SIZE + 1) * NUMBER_OF_PARTS, blobPartStreams, null); + readContextListener.onResponse(readContext); + + countDownLatch.await(); + assertTrue(Files.exists(fileLocation)); + assertEquals(50, Files.readAllBytes(fileLocation).length); + assertFalse(Files.exists(readContextListener.getTmpFileLocation())); + } + + private List initializeBlobPartStreams() { + List blobPartStreams = new ArrayList<>(); + for (int partNumber = 0; partNumber < NUMBER_OF_PARTS; partNumber++) { + InputStream testStream = new ByteArrayInputStream(randomByteArrayOfLength(PART_SIZE)); + int finalPartNumber = partNumber; + blobPartStreams.add( + () -> CompletableFuture.supplyAsync( + () -> new InputStreamContainer(testStream, PART_SIZE, (long) finalPartNumber * PART_SIZE), + threadPool.generic() + ) + ); + } + return blobPartStreams; + } +} diff --git a/server/src/test/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStreamTests.java b/server/src/test/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStreamTests.java new file mode 100644 index 0000000000000..fc2eba4c35e2a --- /dev/null +++ b/server/src/test/java/org/opensearch/common/blobstore/transfer/stream/RateLimitingOffsetRangeInputStreamTests.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore.transfer.stream; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.NIOFSDirectory; +import org.apache.lucene.store.RateLimiter; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; + +public class RateLimitingOffsetRangeInputStreamTests extends ResettableCheckedInputStreamBaseTest { + + private Directory directory; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + directory = new NIOFSDirectory(testFile.getParent()); + } + + @Override + protected OffsetRangeInputStream getOffsetRangeInputStream(long size, long position) throws IOException { + return new RateLimitingOffsetRangeInputStream( + new OffsetRangeIndexInputStream(directory.openInput(testFile.getFileName().toString(), IOContext.DEFAULT), size, position), + () -> new RateLimiter.SimpleRateLimiter(randomIntBetween(10, 20)), + (t) -> {} + ); + } + + @Override + @After + public void tearDown() throws Exception { + directory.close(); + super.tearDown(); + } +} diff --git a/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java b/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java index 55ce961f90380..73e48d42b0270 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java @@ -1169,9 +1169,9 @@ public void testParseOGCPolygonWithHoles() throws IOException, ParseException { } public void testParseInvalidPolygon() throws IOException { - /** - * The following 3 test cases ensure proper error handling of invalid polygons - * per the GeoJSON specification + /* + The following 3 test cases ensure proper error handling of invalid polygons + per the GeoJSON specification */ // test case 1: create an invalid polygon with only 2 points String invalidPoly = XContentFactory.jsonBuilder() diff --git a/server/src/test/java/org/opensearch/common/geo/GeometryIndexerTests.java b/server/src/test/java/org/opensearch/common/geo/GeometryIndexerTests.java index 297b7d9f65d5f..7fc95c2316aef 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeometryIndexerTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeometryIndexerTests.java @@ -225,7 +225,7 @@ public static MultiPoint remove180s(MultiPoint points) { /** * A randomized test that generates a random lines crossing anti-merdian and checks that the decomposed segments of this line * have the same total length (measured using Euclidean distances between neighboring points) as the original line. - * + *

            * It also extracts all points from these lines, performs normalization of these points and then compares that the resulting * points of line normalization match the points of points normalization with the exception of points that were created on the * antimeridian as the result of line decomposition. diff --git a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java index 94ddfd7e7f100..201b4bccc4974 100644 --- a/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/opensearch/common/joda/JavaJodaTimeDuellingTests.java @@ -61,8 +61,8 @@ protected boolean enableWarningsCheck() { } public void testTimezoneParsing() { - /** this testcase won't work in joda. See comment in {@link #testPartialTimeParsing()} - * assertSameDateAs("2016-11-30T+01", "strict_date_optional_time", "strict_date_optional_time"); + /* this testcase won't work in joda. See comment in {@link #testPartialTimeParsing()} + assertSameDateAs("2016-11-30T+01", "strict_date_optional_time", "strict_date_optional_time"); */ assertSameDateAs("2016-11-30T00+01", "strict_date_optional_time", "strict_date_optional_time"); assertSameDateAs("2016-11-30T00+0100", "strict_date_optional_time", "strict_date_optional_time"); diff --git a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java index 2e3c98db6fa81..0ca118fe422a5 100644 --- a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java @@ -47,6 +47,8 @@ import org.opensearch.http.HttpStats; import org.opensearch.http.NullDispatcher; import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -116,7 +118,8 @@ public Map> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap("custom", custom); } @@ -143,7 +146,8 @@ public Map> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher requestDispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { return Collections.singletonMap("custom", custom); } @@ -173,7 +177,8 @@ public Map> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap("default_custom", customTransport); } @@ -188,7 +193,8 @@ public Map> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher requestDispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { Map> supplierMap = new HashMap<>(); supplierMap.put("custom", custom); @@ -216,7 +222,8 @@ public Map> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap("default_custom", customTransport); } @@ -231,7 +238,8 @@ public Map> getHttpTransports( NamedXContentRegistry xContentRegistry, NetworkService networkService, HttpServerTransport.Dispatcher requestDispatcher, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + Tracer tracer ) { Map> supplierMap = new HashMap<>(); supplierMap.put("custom", custom); @@ -313,7 +321,8 @@ private NetworkModule newNetworkModule(Settings settings, NetworkPlugin... plugi xContentRegistry(), null, new NullDispatcher(), - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ); } } diff --git a/server/src/test/java/org/opensearch/common/rounding/DateTimeUnitTests.java b/server/src/test/java/org/opensearch/common/rounding/DateTimeUnitTests.java deleted file mode 100644 index 7b87e136c5f38..0000000000000 --- a/server/src/test/java/org/opensearch/common/rounding/DateTimeUnitTests.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.rounding; - -import org.opensearch.test.OpenSearchTestCase; - -import static org.opensearch.common.rounding.DateTimeUnit.DAY_OF_MONTH; -import static org.opensearch.common.rounding.DateTimeUnit.HOUR_OF_DAY; -import static org.opensearch.common.rounding.DateTimeUnit.MINUTES_OF_HOUR; -import static org.opensearch.common.rounding.DateTimeUnit.MONTH_OF_YEAR; -import static org.opensearch.common.rounding.DateTimeUnit.QUARTER; -import static org.opensearch.common.rounding.DateTimeUnit.SECOND_OF_MINUTE; -import static org.opensearch.common.rounding.DateTimeUnit.WEEK_OF_WEEKYEAR; -import static org.opensearch.common.rounding.DateTimeUnit.YEAR_OF_CENTURY; - -public class DateTimeUnitTests extends OpenSearchTestCase { - - /** - * test that we don't accidentally change enum ids - */ - public void testEnumIds() { - assertEquals(1, WEEK_OF_WEEKYEAR.id()); - assertEquals(WEEK_OF_WEEKYEAR, DateTimeUnit.resolve((byte) 1)); - - assertEquals(2, YEAR_OF_CENTURY.id()); - assertEquals(YEAR_OF_CENTURY, DateTimeUnit.resolve((byte) 2)); - - assertEquals(3, QUARTER.id()); - assertEquals(QUARTER, DateTimeUnit.resolve((byte) 3)); - - assertEquals(4, MONTH_OF_YEAR.id()); - assertEquals(MONTH_OF_YEAR, DateTimeUnit.resolve((byte) 4)); - - assertEquals(5, DAY_OF_MONTH.id()); - assertEquals(DAY_OF_MONTH, DateTimeUnit.resolve((byte) 5)); - - assertEquals(6, HOUR_OF_DAY.id()); - assertEquals(HOUR_OF_DAY, DateTimeUnit.resolve((byte) 6)); - - assertEquals(7, MINUTES_OF_HOUR.id()); - assertEquals(MINUTES_OF_HOUR, DateTimeUnit.resolve((byte) 7)); - - assertEquals(8, SECOND_OF_MINUTE.id()); - assertEquals(SECOND_OF_MINUTE, DateTimeUnit.resolve((byte) 8)); - } -} diff --git a/server/src/test/java/org/opensearch/common/rounding/RoundingDuelTests.java b/server/src/test/java/org/opensearch/common/rounding/RoundingDuelTests.java deleted file mode 100644 index 3088067cd1f84..0000000000000 --- a/server/src/test/java/org/opensearch/common/rounding/RoundingDuelTests.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.rounding; - -import org.opensearch.common.unit.TimeValue; -import org.opensearch.test.OpenSearchTestCase; -import org.joda.time.DateTimeZone; - -import java.time.ZoneOffset; - -import static org.hamcrest.Matchers.is; - -public class RoundingDuelTests extends OpenSearchTestCase { - - // dont include nano/micro seconds as rounding would become zero then and throw an exception - private static final String[] ALLOWED_TIME_SUFFIXES = new String[] { "d", "h", "ms", "s", "m" }; - - public void testDuellingImplementations() { - org.opensearch.common.Rounding.DateTimeUnit randomDateTimeUnit = randomFrom(org.opensearch.common.Rounding.DateTimeUnit.values()); - org.opensearch.common.Rounding.Prepared rounding; - Rounding roundingJoda; - - if (randomBoolean()) { - rounding = org.opensearch.common.Rounding.builder(randomDateTimeUnit).timeZone(ZoneOffset.UTC).build().prepareForUnknown(); - DateTimeUnit dateTimeUnit = DateTimeUnit.resolve(randomDateTimeUnit.getId()); - roundingJoda = Rounding.builder(dateTimeUnit).timeZone(DateTimeZone.UTC).build(); - } else { - TimeValue interval = timeValue(); - rounding = org.opensearch.common.Rounding.builder(interval).timeZone(ZoneOffset.UTC).build().prepareForUnknown(); - roundingJoda = Rounding.builder(interval).timeZone(DateTimeZone.UTC).build(); - } - - long roundValue = randomLong(); - assertThat(roundingJoda.round(roundValue), is(rounding.round(roundValue))); - } - - static TimeValue timeValue() { - return TimeValue.parseTimeValue(randomIntBetween(1, 1000) + randomFrom(ALLOWED_TIME_SUFFIXES), "settingName"); - } -} diff --git a/server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java b/server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java deleted file mode 100644 index 8297f8fcf47e2..0000000000000 --- a/server/src/test/java/org/opensearch/common/rounding/TimeZoneRoundingTests.java +++ /dev/null @@ -1,822 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.rounding; - -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.rounding.Rounding.TimeIntervalRounding; -import org.opensearch.common.rounding.Rounding.TimeUnitRounding; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.test.OpenSearchTestCase; -import org.joda.time.DateTime; -import org.joda.time.DateTimeConstants; -import org.joda.time.DateTimeZone; -import org.joda.time.format.DateTimeFormat; -import org.joda.time.format.DateTimeFormatter; -import org.joda.time.format.ISODateTimeFormat; -import org.hamcrest.Description; -import org.hamcrest.Matcher; -import org.hamcrest.TypeSafeMatcher; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.TimeUnit; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.lessThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.startsWith; - -public class TimeZoneRoundingTests extends OpenSearchTestCase { - - public void testUTCTimeUnitRounding() { - Rounding tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).build(); - DateTimeZone tz = DateTimeZone.UTC; - assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-01T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-01T00:00:00.000Z")), isDate(time("2009-03-01T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-09T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-16T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.QUARTER).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-01T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-04-01T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T01:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-09T01:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-10T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-01T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2013-01-01T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.MINUTES_OF_HOUR).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T01:01:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-09T00:01:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.SECOND_OF_MINUTE).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-10T01:01:01.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-09T00:00:01.000Z"), tz)); - } - - public void testUTCIntervalRounding() { - Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(12)).build(); - DateTimeZone tz = DateTimeZone.UTC; - assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-03T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T00:00:00.000Z")), isDate(time("2009-02-03T12:00:00.000Z"), tz)); - assertThat(tzRounding.round(time("2009-02-03T13:01:01")), isDate(time("2009-02-03T12:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T12:00:00.000Z")), isDate(time("2009-02-04T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(TimeValue.timeValueHours(48)).build(); - assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-03T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T00:00:00.000Z")), isDate(time("2009-02-05T00:00:00.000Z"), tz)); - assertThat(tzRounding.round(time("2009-02-05T13:01:01")), isDate(time("2009-02-05T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-05T00:00:00.000Z")), isDate(time("2009-02-07T00:00:00.000Z"), tz)); - } - - /** - * test TimeIntervalRounding, (interval < 12h) with time zone shift - */ - public void testTimeIntervalRounding() { - DateTimeZone tz = DateTimeZone.forOffsetHours(-1); - Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(6)).timeZone(tz).build(); - assertThat(tzRounding.round(time("2009-02-03T00:01:01")), isDate(time("2009-02-02T19:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-02T19:00:00.000Z")), isDate(time("2009-02-03T01:00:00.000Z"), tz)); - - assertThat(tzRounding.round(time("2009-02-03T13:01:01")), isDate(time("2009-02-03T13:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T13:00:00.000Z")), isDate(time("2009-02-03T19:00:00.000Z"), tz)); - } - - /** - * test DayIntervalRounding, (interval >= 12h) with time zone shift - */ - public void testDayIntervalRounding() { - DateTimeZone tz = DateTimeZone.forOffsetHours(-8); - Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(12)).timeZone(tz).build(); - assertThat(tzRounding.round(time("2009-02-03T00:01:01")), isDate(time("2009-02-02T20:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-02T20:00:00.000Z")), isDate(time("2009-02-03T08:00:00.000Z"), tz)); - - assertThat(tzRounding.round(time("2009-02-03T13:01:01")), isDate(time("2009-02-03T08:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T08:00:00.000Z")), isDate(time("2009-02-03T20:00:00.000Z"), tz)); - } - - public void testDayRounding() { - int timezoneOffset = -2; - Rounding tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(DateTimeZone.forOffsetHours(timezoneOffset)).build(); - assertThat(tzRounding.round(0), equalTo(0L - TimeValue.timeValueHours(24 + timezoneOffset).millis())); - assertThat( - tzRounding.nextRoundingValue(0L - TimeValue.timeValueHours(24 + timezoneOffset).millis()), - equalTo(TimeValue.timeValueHours(-timezoneOffset).millis()) - ); - - DateTimeZone tz = DateTimeZone.forID("-08:00"); - tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); - assertThat(tzRounding.round(time("2012-04-01T04:15:30Z")), isDate(time("2012-03-31T08:00:00Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build(); - assertThat(tzRounding.round(time("2012-04-01T04:15:30Z")), equalTo(time("2012-03-01T08:00:00Z"))); - - // date in Feb-3rd, but still in Feb-2nd in -02:00 timezone - tz = DateTimeZone.forID("-02:00"); - tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); - assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-02T02:00:00"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-02T02:00:00")), isDate(time("2009-02-03T02:00:00"), tz)); - - // date in Feb-3rd, also in -02:00 timezone - tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); - assertThat(tzRounding.round(time("2009-02-03T02:01:01")), isDate(time("2009-02-03T02:00:00"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T02:00:00")), isDate(time("2009-02-04T02:00:00"), tz)); - } - - public void testTimeRounding() { - // hour unit - DateTimeZone tz = DateTimeZone.forOffsetHours(-2); - Rounding tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build(); - assertThat(tzRounding.round(0), equalTo(0L)); - assertThat(tzRounding.nextRoundingValue(0L), equalTo(TimeValue.timeValueHours(1L).getMillis())); - - assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-03T01:00:00"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2009-02-03T01:00:00")), isDate(time("2009-02-03T02:00:00"), tz)); - } - - public void testTimeUnitRoundingDST() { - Rounding tzRounding; - // testing savings to non savings switch - DateTimeZone cet = DateTimeZone.forID("CET"); - tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build(); - assertThat(tzRounding.round(time("2014-10-26T01:01:01", cet)), isDate(time("2014-10-26T01:00:00+02:00"), cet)); - assertThat(tzRounding.nextRoundingValue(time("2014-10-26T01:00:00", cet)), isDate(time("2014-10-26T02:00:00+02:00"), cet)); - assertThat(tzRounding.nextRoundingValue(time("2014-10-26T02:00:00", cet)), isDate(time("2014-10-26T02:00:00+01:00"), cet)); - - // testing non savings to savings switch - tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build(); - assertThat(tzRounding.round(time("2014-03-30T01:01:01", cet)), isDate(time("2014-03-30T01:00:00+01:00"), cet)); - assertThat(tzRounding.nextRoundingValue(time("2014-03-30T01:00:00", cet)), isDate(time("2014-03-30T03:00:00", cet), cet)); - assertThat(tzRounding.nextRoundingValue(time("2014-03-30T03:00:00", cet)), isDate(time("2014-03-30T04:00:00", cet), cet)); - - // testing non savings to savings switch (America/Chicago) - DateTimeZone chg = DateTimeZone.forID("America/Chicago"); - Rounding tzRounding_utc = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.UTC).build(); - assertThat(tzRounding.round(time("2014-03-09T03:01:01", chg)), isDate(time("2014-03-09T03:00:00", chg), chg)); - - Rounding tzRounding_chg = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(chg).build(); - assertThat(tzRounding_chg.round(time("2014-03-09T03:01:01", chg)), isDate(time("2014-03-09T03:00:00", chg), chg)); - - // testing savings to non savings switch 2013 (America/Chicago) - assertThat(tzRounding_utc.round(time("2013-11-03T06:01:01", chg)), isDate(time("2013-11-03T06:00:00", chg), chg)); - assertThat(tzRounding_chg.round(time("2013-11-03T06:01:01", chg)), isDate(time("2013-11-03T06:00:00", chg), chg)); - - // testing savings to non savings switch 2014 (America/Chicago) - assertThat(tzRounding_utc.round(time("2014-11-02T06:01:01", chg)), isDate(time("2014-11-02T06:00:00", chg), chg)); - assertThat(tzRounding_chg.round(time("2014-11-02T06:01:01", chg)), isDate(time("2014-11-02T06:00:00", chg), chg)); - } - - /** - * Randomized test on TimeUnitRounding. Test uses random - * {@link DateTimeUnit} and {@link DateTimeZone} and often (50% of the time) - * chooses test dates that are exactly on or close to offset changes (e.g. - * DST) in the chosen time zone. - * - * It rounds the test date down and up and performs various checks on the - * rounding unit interval that is defined by this. Assumptions tested are - * described in - * {@link #assertInterval(long, long, long, Rounding, DateTimeZone)} - */ - public void testRoundingRandom() { - for (int i = 0; i < 1000; ++i) { - DateTimeUnit timeUnit = randomTimeUnit(); - DateTimeZone tz = randomDateTimeZone(); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - long date = Math.abs(randomLong() % (2 * (long) 10e11)); // 1970-01-01T00:00:00Z - 2033-05-18T05:33:20.000+02:00 - long unitMillis = timeUnit.field(tz).getDurationField().getUnitMillis(); - if (randomBoolean()) { - nastyDate(date, tz, unitMillis); - } - final long roundedDate = rounding.round(date); - final long nextRoundingValue = rounding.nextRoundingValue(roundedDate); - - assertInterval(roundedDate, date, nextRoundingValue, rounding, tz); - - // check correct unit interval width for units smaller than a day, they should be fixed size except for transitions - if (unitMillis <= DateTimeConstants.MILLIS_PER_DAY) { - // if the interval defined didn't cross timezone offset transition, it should cover unitMillis width - if (tz.getOffset(roundedDate - 1) == tz.getOffset(nextRoundingValue + 1)) { - assertThat( - "unit interval width not as expected for [" + timeUnit + "], [" + tz + "] at " + new DateTime(roundedDate), - nextRoundingValue - roundedDate, - equalTo(unitMillis) - ); - } - } - } - } - - /** - * To be even more nasty, go to a transition in the selected time zone. - * In one third of the cases stay there, otherwise go half a unit back or forth - */ - private static long nastyDate(long initialDate, DateTimeZone timezone, long unitMillis) { - long date = timezone.nextTransition(initialDate); - if (randomBoolean()) { - return date + (randomLong() % unitMillis); // positive and negative offset possible - } else { - return date; - } - } - - /** - * test DST end with interval rounding - * CET: 25 October 2015, 03:00:00 clocks were turned backward 1 hour to 25 October 2015, 02:00:00 local standard time - */ - public void testTimeIntervalCET_DST_End() { - long interval = TimeUnit.MINUTES.toMillis(20); - DateTimeZone tz = DateTimeZone.forID("CET"); - Rounding rounding = new TimeIntervalRounding(interval, tz); - - assertThat(rounding.round(time("2015-10-25T01:55:00+02:00")), isDate(time("2015-10-25T01:40:00+02:00"), tz)); - assertThat(rounding.round(time("2015-10-25T02:15:00+02:00")), isDate(time("2015-10-25T02:00:00+02:00"), tz)); - assertThat(rounding.round(time("2015-10-25T02:35:00+02:00")), isDate(time("2015-10-25T02:20:00+02:00"), tz)); - assertThat(rounding.round(time("2015-10-25T02:55:00+02:00")), isDate(time("2015-10-25T02:40:00+02:00"), tz)); - // after DST shift - assertThat(rounding.round(time("2015-10-25T02:15:00+01:00")), isDate(time("2015-10-25T02:00:00+01:00"), tz)); - assertThat(rounding.round(time("2015-10-25T02:35:00+01:00")), isDate(time("2015-10-25T02:20:00+01:00"), tz)); - assertThat(rounding.round(time("2015-10-25T02:55:00+01:00")), isDate(time("2015-10-25T02:40:00+01:00"), tz)); - assertThat(rounding.round(time("2015-10-25T03:15:00+01:00")), isDate(time("2015-10-25T03:00:00+01:00"), tz)); - } - - /** - * test DST start with interval rounding - * CET: 27 March 2016, 02:00:00 clocks were turned forward 1 hour to 27 March 2016, 03:00:00 local daylight time - */ - public void testTimeIntervalCET_DST_Start() { - long interval = TimeUnit.MINUTES.toMillis(20); - DateTimeZone tz = DateTimeZone.forID("CET"); - Rounding rounding = new TimeIntervalRounding(interval, tz); - // test DST start - assertThat(rounding.round(time("2016-03-27T01:55:00+01:00")), isDate(time("2016-03-27T01:40:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T02:00:00+01:00")), isDate(time("2016-03-27T03:00:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:15:00+02:00")), isDate(time("2016-03-27T03:00:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:35:00+02:00")), isDate(time("2016-03-27T03:20:00+02:00"), tz)); - } - - /** - * test DST start with offset not fitting interval, e.g. Asia/Kathmandu - * adding 15min on 1986-01-01T00:00:00 the interval from - * 1986-01-01T00:15:00+05:45 to 1986-01-01T00:20:00+05:45 to only be 5min - * long - */ - public void testTimeInterval_Kathmandu_DST_Start() { - long interval = TimeUnit.MINUTES.toMillis(20); - DateTimeZone tz = DateTimeZone.forID("Asia/Kathmandu"); - Rounding rounding = new TimeIntervalRounding(interval, tz); - assertThat(rounding.round(time("1985-12-31T23:55:00+05:30")), isDate(time("1985-12-31T23:40:00+05:30"), tz)); - assertThat(rounding.round(time("1986-01-01T00:16:00+05:45")), isDate(time("1986-01-01T00:15:00+05:45"), tz)); - assertThat(time("1986-01-01T00:15:00+05:45") - time("1985-12-31T23:40:00+05:30"), equalTo(TimeUnit.MINUTES.toMillis(20))); - assertThat(rounding.round(time("1986-01-01T00:26:00+05:45")), isDate(time("1986-01-01T00:20:00+05:45"), tz)); - assertThat(time("1986-01-01T00:20:00+05:45") - time("1986-01-01T00:15:00+05:45"), equalTo(TimeUnit.MINUTES.toMillis(5))); - assertThat(rounding.round(time("1986-01-01T00:46:00+05:45")), isDate(time("1986-01-01T00:40:00+05:45"), tz)); - assertThat(time("1986-01-01T00:40:00+05:45") - time("1986-01-01T00:20:00+05:45"), equalTo(TimeUnit.MINUTES.toMillis(20))); - } - - /** - * Special test for intervals that don't fit evenly into rounding interval. - * In this case, when interval crosses DST transition point, rounding in local - * time can land in a DST gap which results in wrong UTC rounding values. - */ - public void testIntervalRounding_NotDivisibleInteval() { - DateTimeZone tz = DateTimeZone.forID("CET"); - long interval = TimeUnit.MINUTES.toMillis(14); - Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); - - assertThat(rounding.round(time("2016-03-27T01:41:00+01:00")), isDate(time("2016-03-27T01:30:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T01:51:00+01:00")), isDate(time("2016-03-27T01:44:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T01:59:00+01:00")), isDate(time("2016-03-27T01:58:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:05:00+02:00")), isDate(time("2016-03-27T03:00:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:12:00+02:00")), isDate(time("2016-03-27T03:08:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:25:00+02:00")), isDate(time("2016-03-27T03:22:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-27T03:39:00+02:00")), isDate(time("2016-03-27T03:36:00+02:00"), tz)); - } - - /** - * Test for half day rounding intervals scrossing DST. - */ - public void testIntervalRounding_HalfDay_DST() { - DateTimeZone tz = DateTimeZone.forID("CET"); - long interval = TimeUnit.HOURS.toMillis(12); - Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); - - assertThat(rounding.round(time("2016-03-26T01:00:00+01:00")), isDate(time("2016-03-26T00:00:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-26T13:00:00+01:00")), isDate(time("2016-03-26T12:00:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T01:00:00+01:00")), isDate(time("2016-03-27T00:00:00+01:00"), tz)); - assertThat(rounding.round(time("2016-03-27T13:00:00+02:00")), isDate(time("2016-03-27T12:00:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-28T01:00:00+02:00")), isDate(time("2016-03-28T00:00:00+02:00"), tz)); - assertThat(rounding.round(time("2016-03-28T13:00:00+02:00")), isDate(time("2016-03-28T12:00:00+02:00"), tz)); - } - - /** - * randomized test on {@link TimeIntervalRounding} with random interval and time zone offsets - */ - public void testIntervalRoundingRandom() { - for (int i = 0; i < 1000; i++) { - TimeUnit unit = randomFrom(new TimeUnit[] { TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS }); - long interval = unit.toMillis(randomIntBetween(1, 365)); - DateTimeZone tz = randomDateTimeZone(); - Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); - long mainDate = Math.abs(randomLong() % (2 * (long) 10e11)); // 1970-01-01T00:00:00Z - 2033-05-18T05:33:20.000+02:00 - if (randomBoolean()) { - mainDate = nastyDate(mainDate, tz, interval); - } - // check two intervals around date - long previousRoundedValue = Long.MIN_VALUE; - for (long date = mainDate - 2 * interval; date < mainDate + 2 * interval; date += interval / 2) { - try { - final long roundedDate = rounding.round(date); - final long nextRoundingValue = rounding.nextRoundingValue(roundedDate); - assertThat("Rounding should be idempotent", roundedDate, equalTo(rounding.round(roundedDate))); - assertThat("Rounded value smaller or equal than unrounded", roundedDate, lessThanOrEqualTo(date)); - assertThat( - "Values smaller than rounded value should round further down", - rounding.round(roundedDate - 1), - lessThan(roundedDate) - ); - assertThat("Rounding should be >= previous rounding value", roundedDate, greaterThanOrEqualTo(previousRoundedValue)); - - if (tz.isFixed()) { - assertThat("NextRounding value should be greater than date", nextRoundingValue, greaterThan(roundedDate)); - assertThat( - "NextRounding value should be interval from rounded value", - nextRoundingValue - roundedDate, - equalTo(interval) - ); - assertThat( - "NextRounding value should be a rounded date", - nextRoundingValue, - equalTo(rounding.round(nextRoundingValue)) - ); - } - previousRoundedValue = roundedDate; - } catch (AssertionError e) { - logger.error("Rounding error at {}, timezone {}, interval: {},", new DateTime(date, tz), tz, interval); - throw e; - } - } - } - } - - /** - * Test that rounded values are always greater or equal to last rounded value if date is increasing. - * The example covers an interval around 2011-10-30T02:10:00+01:00, time zone CET, interval: 2700000ms - */ - public void testIntervalRoundingMonotonic_CET() { - long interval = TimeUnit.MINUTES.toMillis(45); - DateTimeZone tz = DateTimeZone.forID("CET"); - Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); - List> expectedDates = new ArrayList<>(); - // first date is the date to be rounded, second the expected result - expectedDates.add(new Tuple<>("2011-10-30T01:40:00.000+02:00", "2011-10-30T01:30:00.000+02:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:02:30.000+02:00", "2011-10-30T01:30:00.000+02:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:25:00.000+02:00", "2011-10-30T02:15:00.000+02:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:47:30.000+02:00", "2011-10-30T02:15:00.000+02:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:10:00.000+01:00", "2011-10-30T02:15:00.000+02:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:32:30.000+01:00", "2011-10-30T02:15:00.000+01:00")); - expectedDates.add(new Tuple<>("2011-10-30T02:55:00.000+01:00", "2011-10-30T02:15:00.000+01:00")); - expectedDates.add(new Tuple<>("2011-10-30T03:17:30.000+01:00", "2011-10-30T03:00:00.000+01:00")); - - long previousDate = Long.MIN_VALUE; - for (Tuple dates : expectedDates) { - final long roundedDate = rounding.round(time(dates.v1())); - assertThat(roundedDate, isDate(time(dates.v2()), tz)); - assertThat(roundedDate, greaterThanOrEqualTo(previousDate)); - previousDate = roundedDate; - } - // here's what this means for interval widths - assertEquals(TimeUnit.MINUTES.toMillis(45), time("2011-10-30T02:15:00.000+02:00") - time("2011-10-30T01:30:00.000+02:00")); - assertEquals(TimeUnit.MINUTES.toMillis(60), time("2011-10-30T02:15:00.000+01:00") - time("2011-10-30T02:15:00.000+02:00")); - assertEquals(TimeUnit.MINUTES.toMillis(45), time("2011-10-30T03:00:00.000+01:00") - time("2011-10-30T02:15:00.000+01:00")); - } - - /** - * special test for DST switch from #9491 - */ - public void testAmbiguousHoursAfterDSTSwitch() { - Rounding tzRounding; - final DateTimeZone tz = DateTimeZone.forID("Asia/Jerusalem"); - tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build(); - assertThat(tzRounding.round(time("2014-10-26T00:30:00+03:00")), isDate(time("2014-10-26T00:00:00+03:00"), tz)); - assertThat(tzRounding.round(time("2014-10-26T01:30:00+03:00")), isDate(time("2014-10-26T01:00:00+03:00"), tz)); - // the utc date for "2014-10-25T03:00:00+03:00" and "2014-10-25T03:00:00+02:00" is the same, local time turns back 1h here - assertThat(time("2014-10-26T03:00:00+03:00"), isDate(time("2014-10-26T02:00:00+02:00"), tz)); - assertThat(tzRounding.round(time("2014-10-26T01:30:00+02:00")), isDate(time("2014-10-26T01:00:00+02:00"), tz)); - assertThat(tzRounding.round(time("2014-10-26T02:30:00+02:00")), isDate(time("2014-10-26T02:00:00+02:00"), tz)); - - // Day interval - tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); - assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-11-11T00:00:00", tz), tz)); - // DST on - assertThat(tzRounding.round(time("2014-08-11T17:00:00", tz)), isDate(time("2014-08-11T00:00:00", tz), tz)); - // Day of switching DST on -> off - assertThat(tzRounding.round(time("2014-10-26T17:00:00", tz)), isDate(time("2014-10-26T00:00:00", tz), tz)); - // Day of switching DST off -> on - assertThat(tzRounding.round(time("2015-03-27T17:00:00", tz)), isDate(time("2015-03-27T00:00:00", tz), tz)); - - // Month interval - tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build(); - assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-11-01T00:00:00", tz), tz)); - // DST on - assertThat(tzRounding.round(time("2014-10-10T17:00:00", tz)), isDate(time("2014-10-01T00:00:00", tz), tz)); - - // Year interval - tzRounding = Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build(); - assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-01-01T00:00:00", tz), tz)); - - // Two timestamps in same year and different timezone offset ("Double buckets" issue - #9491) - tzRounding = Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build(); - assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(tzRounding.round(time("2014-08-11T17:00:00", tz)), tz)); - } - - /** - * test for #10025, strict local to UTC conversion can cause joda exceptions - * on DST start - */ - public void testLenientConversionDST() { - DateTimeZone tz = DateTimeZone.forID("America/Sao_Paulo"); - long start = time("2014-10-18T20:50:00.000", tz); - long end = time("2014-10-19T01:00:00.000", tz); - Rounding tzRounding = new Rounding.TimeUnitRounding(DateTimeUnit.MINUTES_OF_HOUR, tz); - Rounding dayTzRounding = new Rounding.TimeIntervalRounding(60000, tz); - for (long time = start; time < end; time = time + 60000) { - assertThat(tzRounding.nextRoundingValue(time), greaterThan(time)); - assertThat(dayTzRounding.nextRoundingValue(time), greaterThan(time)); - } - } - - public void testEdgeCasesTransition() { - { - // standard +/-1 hour DST transition, CET - DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; - DateTimeZone tz = DateTimeZone.forID("CET"); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - // 29 Mar 2015 - Daylight Saving Time Started - // at 02:00:00 clocks were turned forward 1 hour to 03:00:00 - assertInterval(time("2015-03-29T00:00:00.000+01:00"), time("2015-03-29T01:00:00.000+01:00"), rounding, 60, tz); - assertInterval(time("2015-03-29T01:00:00.000+01:00"), time("2015-03-29T03:00:00.000+02:00"), rounding, 60, tz); - assertInterval(time("2015-03-29T03:00:00.000+02:00"), time("2015-03-29T04:00:00.000+02:00"), rounding, 60, tz); - - // 25 Oct 2015 - Daylight Saving Time Ended - // at 03:00:00 clocks were turned backward 1 hour to 02:00:00 - assertInterval(time("2015-10-25T01:00:00.000+02:00"), time("2015-10-25T02:00:00.000+02:00"), rounding, 60, tz); - assertInterval(time("2015-10-25T02:00:00.000+02:00"), time("2015-10-25T02:00:00.000+01:00"), rounding, 60, tz); - assertInterval(time("2015-10-25T02:00:00.000+01:00"), time("2015-10-25T03:00:00.000+01:00"), rounding, 60, tz); - } - - { - // time zone "Asia/Kathmandu" - // 1 Jan 1986 - Time Zone Change (IST → NPT), at 00:00:00 clocks were turned forward 00:15 minutes - // - // hour rounding is stable before 1985-12-31T23:00:00.000 and after 1986-01-01T01:00:00.000+05:45 - // the interval between is 105 minutes long because the hour after transition starts at 00:15 - // which is not a round value for hourly rounding - DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; - DateTimeZone tz = DateTimeZone.forID("Asia/Kathmandu"); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - assertInterval(time("1985-12-31T22:00:00.000+05:30"), time("1985-12-31T23:00:00.000+05:30"), rounding, 60, tz); - assertInterval(time("1985-12-31T23:00:00.000+05:30"), time("1986-01-01T01:00:00.000+05:45"), rounding, 105, tz); - assertInterval(time("1986-01-01T01:00:00.000+05:45"), time("1986-01-01T02:00:00.000+05:45"), rounding, 60, tz); - } - - { - // time zone "Australia/Lord_Howe" - // 3 Mar 1991 - Daylight Saving Time Ended - // at 02:00:00 clocks were turned backward 0:30 hours to Sunday, 3 March 1991, 01:30:00 - DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; - DateTimeZone tz = DateTimeZone.forID("Australia/Lord_Howe"); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - assertInterval(time("1991-03-03T00:00:00.000+11:00"), time("1991-03-03T01:00:00.000+11:00"), rounding, 60, tz); - assertInterval(time("1991-03-03T01:00:00.000+11:00"), time("1991-03-03T02:00:00.000+10:30"), rounding, 90, tz); - assertInterval(time("1991-03-03T02:00:00.000+10:30"), time("1991-03-03T03:00:00.000+10:30"), rounding, 60, tz); - - // 27 Oct 1991 - Daylight Saving Time Started - // at 02:00:00 clocks were turned forward 0:30 hours to 02:30:00 - assertInterval(time("1991-10-27T00:00:00.000+10:30"), time("1991-10-27T01:00:00.000+10:30"), rounding, 60, tz); - // the interval containing the switch time is 90 minutes long - assertInterval(time("1991-10-27T01:00:00.000+10:30"), time("1991-10-27T03:00:00.000+11:00"), rounding, 90, tz); - assertInterval(time("1991-10-27T03:00:00.000+11:00"), time("1991-10-27T04:00:00.000+11:00"), rounding, 60, tz); - } - - { - // time zone "Pacific/Chatham" - // 5 Apr 2015 - Daylight Saving Time Ended - // at 03:45:00 clocks were turned backward 1 hour to 02:45:00 - DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; - DateTimeZone tz = DateTimeZone.forID("Pacific/Chatham"); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - assertInterval(time("2015-04-05T02:00:00.000+13:45"), time("2015-04-05T03:00:00.000+13:45"), rounding, 60, tz); - assertInterval(time("2015-04-05T03:00:00.000+13:45"), time("2015-04-05T03:00:00.000+12:45"), rounding, 60, tz); - assertInterval(time("2015-04-05T03:00:00.000+12:45"), time("2015-04-05T04:00:00.000+12:45"), rounding, 60, tz); - - // 27 Sep 2015 - Daylight Saving Time Started - // at 02:45:00 clocks were turned forward 1 hour to 03:45:00 - - assertInterval(time("2015-09-27T01:00:00.000+12:45"), time("2015-09-27T02:00:00.000+12:45"), rounding, 60, tz); - assertInterval(time("2015-09-27T02:00:00.000+12:45"), time("2015-09-27T04:00:00.000+13:45"), rounding, 60, tz); - assertInterval(time("2015-09-27T04:00:00.000+13:45"), time("2015-09-27T05:00:00.000+13:45"), rounding, 60, tz); - } - } - - public void testDST_Europe_Rome() { - // time zone "Europe/Rome", rounding to days. Rome had two midnights on the day the clocks went back in 1978, and - // timeZone.convertLocalToUTC() gives the later of the two because Rome is east of UTC, whereas we want the earlier. - - DateTimeUnit timeUnit = DateTimeUnit.DAY_OF_MONTH; - DateTimeZone tz = DateTimeZone.forID("Europe/Rome"); - Rounding rounding = new TimeUnitRounding(timeUnit, tz); - - { - long timeBeforeFirstMidnight = time("1978-09-30T23:59:00+02:00"); - long floor = rounding.round(timeBeforeFirstMidnight); - assertThat(floor, isDate(time("1978-09-30T00:00:00+02:00"), tz)); - } - - { - long timeBetweenMidnights = time("1978-10-01T00:30:00+02:00"); - long floor = rounding.round(timeBetweenMidnights); - assertThat(floor, isDate(time("1978-10-01T00:00:00+02:00"), tz)); - } - - { - long timeAfterSecondMidnight = time("1978-10-01T00:30:00+01:00"); - long floor = rounding.round(timeAfterSecondMidnight); - assertThat(floor, isDate(time("1978-10-01T00:00:00+02:00"), tz)); - - long prevFloor = rounding.round(floor - 1); - assertThat(prevFloor, lessThan(floor)); - assertThat(prevFloor, isDate(time("1978-09-30T00:00:00+02:00"), tz)); - } - } - - /** - * Test for a time zone whose days overlap because the clocks are set back across midnight at the end of DST. - */ - public void testDST_America_St_Johns() { - // time zone "America/St_Johns", rounding to days. - DateTimeUnit timeUnit = DateTimeUnit.DAY_OF_MONTH; - DateTimeZone tz = DateTimeZone.forID("America/St_Johns"); - Rounding rounding = new TimeUnitRounding(timeUnit, tz); - - // 29 October 2006 - Daylight Saving Time ended, changing the UTC offset from -02:30 to -03:30. - // This happened at 02:31 UTC, 00:01 local time, so the clocks were set back 1 hour to 23:01 on the 28th. - // This means that 2006-10-29 has _two_ midnights, one in the -02:30 offset and one in the -03:30 offset. - // Only the first of these is considered "rounded". Moreover, the extra time between 23:01 and 23:59 - // should be considered as part of the 28th even though it comes after midnight on the 29th. - - { - // Times before the first midnight should be rounded up to the first midnight. - long timeBeforeFirstMidnight = time("2006-10-28T23:30:00.000-02:30"); - long floor = rounding.round(timeBeforeFirstMidnight); - assertThat(floor, isDate(time("2006-10-28T00:00:00.000-02:30"), tz)); - long ceiling = rounding.nextRoundingValue(timeBeforeFirstMidnight); - assertThat(ceiling, isDate(time("2006-10-29T00:00:00.000-02:30"), tz)); - assertInterval(floor, timeBeforeFirstMidnight, ceiling, rounding, tz); - } - - { - // Times between the two midnights which are on the later day should be rounded down to the later day's midnight. - long timeBetweenMidnights = time("2006-10-29T00:00:30.000-02:30"); - // (this is halfway through the last minute before the clocks changed, in which local time was ambiguous) - - long floor = rounding.round(timeBetweenMidnights); - assertThat(floor, isDate(time("2006-10-29T00:00:00.000-02:30"), tz)); - - long ceiling = rounding.nextRoundingValue(timeBetweenMidnights); - assertThat(ceiling, isDate(time("2006-10-30T00:00:00.000-03:30"), tz)); - - assertInterval(floor, timeBetweenMidnights, ceiling, rounding, tz); - } - - { - // Times between the two midnights which are on the earlier day should be rounded down to the earlier day's midnight. - long timeBetweenMidnights = time("2006-10-28T23:30:00.000-03:30"); - // (this is halfway through the hour after the clocks changed, in which local time was ambiguous) - - long floor = rounding.round(timeBetweenMidnights); - assertThat(floor, isDate(time("2006-10-28T00:00:00.000-02:30"), tz)); - - long ceiling = rounding.nextRoundingValue(timeBetweenMidnights); - assertThat(ceiling, isDate(time("2006-10-29T00:00:00.000-02:30"), tz)); - - assertInterval(floor, timeBetweenMidnights, ceiling, rounding, tz); - } - - { - // Times after the second midnight should be rounded down to the first midnight. - long timeAfterSecondMidnight = time("2006-10-29T06:00:00.000-03:30"); - long floor = rounding.round(timeAfterSecondMidnight); - assertThat(floor, isDate(time("2006-10-29T00:00:00.000-02:30"), tz)); - long ceiling = rounding.nextRoundingValue(timeAfterSecondMidnight); - assertThat(ceiling, isDate(time("2006-10-30T00:00:00.000-03:30"), tz)); - assertInterval(floor, timeAfterSecondMidnight, ceiling, rounding, tz); - } - } - - /** - * tests for dst transition with overlaps and day roundings. - */ - public void testDST_END_Edgecases() { - // First case, dst happens at 1am local time, switching back one hour. - // We want the overlapping hour to count for the next day, making it a 25h interval - - DateTimeUnit timeUnit = DateTimeUnit.DAY_OF_MONTH; - DateTimeZone tz = DateTimeZone.forID("Atlantic/Azores"); - Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - // Sunday, 29 October 2000, 01:00:00 clocks were turned backward 1 hour - // to Sunday, 29 October 2000, 00:00:00 local standard time instead - // which means there were two midnights that day. - - long midnightBeforeTransition = time("2000-10-29T00:00:00", tz); - long midnightOfTransition = time("2000-10-29T00:00:00-01:00"); - assertEquals(60L * 60L * 1000L, midnightOfTransition - midnightBeforeTransition); - long nextMidnight = time("2000-10-30T00:00:00", tz); - - assertInterval(midnightBeforeTransition, nextMidnight, rounding, 25 * 60, tz); - - assertThat(rounding.round(time("2000-10-29T06:00:00-01:00")), isDate(time("2000-10-29T00:00:00Z"), tz)); - - // Second case, dst happens at 0am local time, switching back one hour to 23pm local time. - // We want the overlapping hour to count for the previous day here - - tz = DateTimeZone.forID("America/Lima"); - rounding = new Rounding.TimeUnitRounding(timeUnit, tz); - - // Sunday, 1 April 1990, 00:00:00 clocks were turned backward 1 hour to - // Saturday, 31 March 1990, 23:00:00 local standard time instead - - midnightBeforeTransition = time("1990-03-31T00:00:00.000-04:00"); - nextMidnight = time("1990-04-01T00:00:00.000-05:00"); - assertInterval(midnightBeforeTransition, nextMidnight, rounding, 25 * 60, tz); - - // make sure the next interval is 24h long again - long midnightAfterTransition = time("1990-04-01T00:00:00.000-05:00"); - nextMidnight = time("1990-04-02T00:00:00.000-05:00"); - assertInterval(midnightAfterTransition, nextMidnight, rounding, 24 * 60, tz); - } - - /** - * Test that time zones are correctly parsed. There is a bug with - * Joda 2.9.4 (see https://github.com/JodaOrg/joda-time/issues/373) - */ - public void testsTimeZoneParsing() { - final DateTime expected = new DateTime(2016, 11, 10, 5, 37, 59, randomDateTimeZone()); - - // Formatter used to print and parse the sample date. - // Printing the date works but parsing it back fails - // with Joda 2.9.4 - DateTimeFormatter formatter = DateTimeFormat.forPattern("YYYY-MM-dd'T'HH:mm:ss " + randomFrom("ZZZ", "[ZZZ]", "'['ZZZ']'")); - - String dateTimeAsString = formatter.print(expected); - assertThat(dateTimeAsString, startsWith("2016-11-10T05:37:59 ")); - - DateTime parsedDateTime = formatter.parseDateTime(dateTimeAsString); - assertThat(parsedDateTime.getZone(), equalTo(expected.getZone())); - } - - private static void assertInterval(long rounded, long nextRoundingValue, Rounding rounding, int minutes, DateTimeZone tz) { - assertInterval(rounded, dateBetween(rounded, nextRoundingValue), nextRoundingValue, rounding, tz); - assertEquals(DateTimeConstants.MILLIS_PER_MINUTE * minutes, nextRoundingValue - rounded); - } - - /** - * perform a number on assertions and checks on {@link TimeUnitRounding} intervals - * @param rounded the expected low end of the rounding interval - * @param unrounded a date in the interval to be checked for rounding - * @param nextRoundingValue the expected upper end of the rounding interval - * @param rounding the rounding instance - */ - private static void assertInterval(long rounded, long unrounded, long nextRoundingValue, Rounding rounding, DateTimeZone tz) { - assertThat("rounding should be idempotent ", rounding.round(rounded), isDate(rounded, tz)); - assertThat("rounded value smaller or equal than unrounded" + rounding, rounded, lessThanOrEqualTo(unrounded)); - assertThat("values less than rounded should round further down" + rounding, rounding.round(rounded - 1), lessThan(rounded)); - assertThat("nextRounding value should be a rounded date", rounding.round(nextRoundingValue), isDate(nextRoundingValue, tz)); - assertThat( - "values above nextRounding should round down there", - rounding.round(nextRoundingValue + 1), - isDate(nextRoundingValue, tz) - ); - - if (isTimeWithWellDefinedRounding(tz, unrounded)) { - assertThat("nextRounding value should be greater than date" + rounding, nextRoundingValue, greaterThan(unrounded)); - - long dateBetween = dateBetween(rounded, nextRoundingValue); - assertThat( - "dateBetween [" + new DateTime(dateBetween, tz) + "] should round down to roundedDate", - rounding.round(dateBetween), - isDate(rounded, tz) - ); - assertThat( - "dateBetween [" + new DateTime(dateBetween, tz) + "] should round up to nextRoundingValue", - rounding.nextRoundingValue(dateBetween), - isDate(nextRoundingValue, tz) - ); - } - } - - private static boolean isTimeWithWellDefinedRounding(DateTimeZone tz, long t) { - if (tz.getID().equals("America/St_Johns") - || tz.getID().equals("America/Goose_Bay") - || tz.getID().equals("America/Moncton") - || tz.getID().equals("Canada/Newfoundland")) { - - // Clocks went back at 00:01 between 1987 and 2010, causing overlapping days. - // These timezones are otherwise uninteresting, so just skip this period. - - return t <= time("1987-10-01T00:00:00Z") || t >= time("2010-12-01T00:00:00Z"); - } - - if (tz.getID().equals("Antarctica/Casey")) { - - // Clocks went back 3 hours at 02:00 on 2010-03-05, causing overlapping days. - - return t <= time("2010-03-03T00:00:00Z") || t >= time("2010-03-07T00:00:00Z"); - } - - return true; - } - - private static long dateBetween(long lower, long upper) { - long dateBetween = randomLongBetween(lower, upper - 1); - assert lower <= dateBetween && dateBetween < upper; - return dateBetween; - } - - private static DateTimeUnit randomTimeUnit() { - byte id = (byte) randomIntBetween(1, 8); - return DateTimeUnit.resolve(id); - } - - private static long time(String time) { - return time(time, DateTimeZone.UTC); - } - - private static long time(String time, DateTimeZone zone) { - return ISODateTimeFormat.dateOptionalTimeParser().withZone(zone).parseMillis(time); - } - - private static Matcher isDate(final long expected, DateTimeZone tz) { - return new TypeSafeMatcher() { - @Override - public boolean matchesSafely(final Long item) { - return expected == item.longValue(); - } - - @Override - public void describeTo(Description description) { - description.appendText(new DateTime(expected, tz) + " [" + expected + "] "); - } - - @Override - protected void describeMismatchSafely(final Long actual, final Description mismatchDescription) { - mismatchDescription.appendText(" was ").appendValue(new DateTime(actual, tz) + " [" + actual + "]"); - } - }; - } -} diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java index df34c19dbab8e..36ae21b4936ff 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java @@ -36,7 +36,6 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexSettings; -import org.opensearch.search.SearchBootstrapSettings; import org.opensearch.search.SearchService; import org.opensearch.test.FeatureFlagSetter; import org.hamcrest.Matchers; @@ -340,32 +339,32 @@ public void testConcurrentSegmentSearchIndexSettings() { public void testMaxSliceCountClusterSettingsForConcurrentSearch() { // Test that we throw an exception without the feature flag Settings settings = Settings.builder() - .put(SearchBootstrapSettings.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), true) + .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), 2) .build(); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new SettingsModule(settings)); assertTrue( ex.getMessage() - .contains("unknown setting [" + SearchBootstrapSettings.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey()) + .contains("unknown setting [" + SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey() + "]") ); // Test that the settings updates correctly with the feature flag FeatureFlagSetter.set(FeatureFlags.CONCURRENT_SEGMENT_SEARCH); int settingValue = randomIntBetween(0, 10); Settings settingsWithFeatureFlag = Settings.builder() - .put(SearchBootstrapSettings.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), settingValue) + .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), settingValue) .build(); SettingsModule settingsModule = new SettingsModule(settingsWithFeatureFlag); assertEquals( settingValue, - (int) SearchBootstrapSettings.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.get(settingsModule.getSettings()) + (int) SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.get(settingsModule.getSettings()) ); // Test that negative value is not allowed settingValue = -1; final Settings settingsWithFeatureFlag_2 = Settings.builder() - .put(SearchBootstrapSettings.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), settingValue) + .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), settingValue) .build(); - ex = expectThrows(IllegalArgumentException.class, () -> new SettingsModule(settingsWithFeatureFlag_2)); - assertTrue(ex.getMessage().contains(SearchBootstrapSettings.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey())); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> new SettingsModule(settingsWithFeatureFlag_2)); + assertTrue(iae.getMessage().contains(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey())); } } diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java index d376efac1be40..dc116582e4103 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java @@ -302,6 +302,20 @@ public void testPrefixNormalization() { assertThat(settings.get("foo.test"), equalTo("test")); } + public void testPrefixNormalizationArchived() { + Settings settings = Settings.builder().put("archived.foo.bar", "baz").normalizePrefix("foo.").build(); + + assertThat(settings.size(), equalTo(1)); + assertThat(settings.get("foo.archived.foo.bar"), nullValue()); + assertThat(settings.get("archived.foo.bar"), equalTo("baz")); + + settings = Settings.builder().put("archived.foo.*", "baz").normalizePrefix("foo.").build(); + + assertThat(settings.size(), equalTo(1)); + assertThat(settings.get("foo.archived.foo.*"), nullValue()); + assertThat(settings.get("archived.foo.*"), equalTo("baz")); + } + public void testFilteredMap() { Settings.Builder builder = Settings.builder(); builder.put("a", "a1"); diff --git a/server/src/test/java/org/opensearch/common/util/BytesRefHashTests.java b/server/src/test/java/org/opensearch/common/util/BytesRefHashTests.java index a78a35e5a2412..adcec8f07f702 100644 --- a/server/src/test/java/org/opensearch/common/util/BytesRefHashTests.java +++ b/server/src/test/java/org/opensearch/common/util/BytesRefHashTests.java @@ -35,6 +35,7 @@ import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.opensearch.common.hash.T1ha1; import org.opensearch.common.settings.Settings; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; @@ -44,6 +45,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.stream.Stream; public class BytesRefHashTests extends OpenSearchTestCase { @@ -57,9 +59,13 @@ private void newHash() { if (hash != null) { hash.close(); } - // Test high load factors to make sure that collision resolution works fine - final float maxLoadFactor = 0.6f + randomFloat() * 0.39f; - hash = new BytesRefHash(randomIntBetween(0, 100), maxLoadFactor, randomBigArrays()); + long seed = randomLong(); + hash = new BytesRefHash( + randomIntBetween(1, 100), // random capacity + 0.6f + randomFloat() * 0.39f, // random load factor to verify collision resolution + key -> T1ha1.hash(key.bytes, key.offset, key.length, seed), + randomBigArrays() + ); } @Override @@ -68,39 +74,34 @@ public void setUp() throws Exception { newHash(); } - public void testDuel() { - final int len = randomIntBetween(1, 100000); - final BytesRef[] values = new BytesRef[len]; - for (int i = 0; i < values.length; ++i) { - values[i] = new BytesRef(randomAlphaOfLength(5)); - } - final Map valueToId = new HashMap<>(); - final BytesRef[] idToValue = new BytesRef[values.length]; - final int iters = randomInt(1000000); - for (int i = 0; i < iters; ++i) { - final BytesRef value = randomFrom(values); - if (valueToId.containsKey(value)) { - assertEquals(-1 - valueToId.get(value), hash.add(value, value.hashCode())); + public void testFuzzy() { + Map reference = new HashMap<>(); + BytesRef[] keys = Stream.generate(() -> new BytesRef(randomAlphaOfLength(20))) + .limit(randomIntBetween(1000, 2000)) + .toArray(BytesRef[]::new); + + // Verify the behaviour of "add" and "find". + for (int i = 0; i < keys.length * 10; i++) { + BytesRef key = keys[i % keys.length]; + if (reference.containsKey(key)) { + long expectedOrdinal = reference.get(key); + assertEquals(-1 - expectedOrdinal, hash.add(key)); + assertEquals(expectedOrdinal, hash.find(key)); } else { - assertEquals(valueToId.size(), hash.add(value, value.hashCode())); - idToValue[valueToId.size()] = value; - valueToId.put(value, valueToId.size()); + assertEquals(-1, hash.find(key)); + reference.put(key, (long) reference.size()); + assertEquals((long) reference.get(key), hash.add(key)); } } - assertEquals(valueToId.size(), hash.size()); - for (final var next : valueToId.entrySet()) { - assertEquals(next.getValue().longValue(), hash.find(next.getKey(), next.getKey().hashCode())); + // Verify the behaviour of "get". + BytesRef scratch = new BytesRef(); + for (Map.Entry entry : reference.entrySet()) { + assertEquals(entry.getKey(), hash.get(entry.getValue(), scratch)); } - for (long i = 0; i < hash.capacity(); ++i) { - final long id = hash.id(i); - BytesRef spare = new BytesRef(); - if (id >= 0) { - hash.get(id, spare); - assertEquals(idToValue[(int) id], spare); - } - } + // Verify the behaviour of "size". + assertEquals(reference.size(), hash.size()); hash.close(); } diff --git a/server/src/test/java/org/opensearch/core/RestStatusTests.java b/server/src/test/java/org/opensearch/core/RestStatusTests.java new file mode 100644 index 0000000000000..f8dba99aa8b60 --- /dev/null +++ b/server/src/test/java/org/opensearch/core/RestStatusTests.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.core; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.action.ShardOperationFailedException; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.PriorityQueue; + +public class RestStatusTests extends OpenSearchTestCase { + + public void testStatusReturns200ForNoFailures() { + int totalShards = randomIntBetween(1, 100); + int successfulShards = randomIntBetween(1, totalShards); + + assertEquals(RestStatus.OK, RestStatus.status(successfulShards, totalShards)); + } + + public void testStatusReturns503ForUnavailableShards() { + int totalShards = randomIntBetween(1, 100); + int successfulShards = 0; + + assertEquals(RestStatus.SERVICE_UNAVAILABLE, RestStatus.status(successfulShards, totalShards)); + } + + public void testStatusReturnsFailureStatusWhenFailuresExist() { + int totalShards = randomIntBetween(1, 100); + int successfulShards = 0; + + TestException[] failures = new TestException[totalShards]; + PriorityQueue heapOfFailures = new PriorityQueue<>((x, y) -> y.status().compareTo(x.status())); + + for (int i = 0; i < totalShards; ++i) { + /* + * Status here doesn't need to convey failure and is not as per rest + * contract. We're not testing the contract, but if status() returns + * the greatest rest code from the failures selection + */ + RestStatus status = randomFrom(RestStatus.values()); + TestException failure = new TestException(status); + + failures[i] = failure; + heapOfFailures.add(failure); + } + + assertEquals(heapOfFailures.peek().status(), RestStatus.status(successfulShards, totalShards, failures)); + } + + public void testSerialization() throws IOException { + final RestStatus status = randomFrom(RestStatus.values()); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + RestStatus.writeTo(out, status); + + try (StreamInput in = out.bytes().streamInput()) { + RestStatus deserializedStatus = RestStatus.readFrom(in); + + assertEquals(status, deserializedStatus); + } + } + } + + private static class TestException extends ShardOperationFailedException { + TestException(final RestStatus status) { + super("super-idx", randomInt(), "gone-fishing", status, new Throwable("cake")); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new IOException("not implemented"); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + throw new IOException("not implemented"); + } + } + +} diff --git a/server/src/test/java/org/opensearch/crypto/CryptoHandlerRegistryTests.java b/server/src/test/java/org/opensearch/crypto/CryptoHandlerRegistryTests.java new file mode 100644 index 0000000000000..93a7b3d3eb4b9 --- /dev/null +++ b/server/src/test/java/org/opensearch/crypto/CryptoHandlerRegistryTests.java @@ -0,0 +1,168 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto; + +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.MasterKeyProvider; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.CryptoKeyProviderPlugin; +import org.opensearch.plugins.CryptoPlugin; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import org.mockito.ArgumentMatchers; +import org.mockito.Mockito; + +public class CryptoHandlerRegistryTests extends OpenSearchTestCase { + + private TestCryptoHandlerRegistry cryptoManagerRegistry; + private String pluginTypeWithCreationFailure; + private CryptoKeyProviderPlugin cryptoPlugin1; + private CryptoKeyProviderPlugin cryptoPlugin2; + + @Before + public void setup() { + List cryptoKPPlugins = new ArrayList<>(); + CryptoKeyProviderPlugin cryptoPlugin1 = Mockito.mock(CryptoKeyProviderPlugin.class); + String pluginType1 = UUID.randomUUID().toString(); + Mockito.when(cryptoPlugin1.type()).thenReturn(pluginType1); + MasterKeyProvider masterKeyProvider1 = Mockito.mock(MasterKeyProvider.class); + Mockito.when(cryptoPlugin1.createKeyProvider(ArgumentMatchers.any())).thenReturn(masterKeyProvider1); + this.cryptoPlugin1 = cryptoPlugin1; + cryptoKPPlugins.add(cryptoPlugin1); + + CryptoKeyProviderPlugin cryptoPlugin2 = Mockito.mock(CryptoKeyProviderPlugin.class); + String pluginType2 = UUID.randomUUID().toString(); + Mockito.when(cryptoPlugin2.type()).thenReturn(pluginType2); + MasterKeyProvider masterKeyProvider2 = Mockito.mock(MasterKeyProvider.class); + Mockito.when(cryptoPlugin2.createKeyProvider(ArgumentMatchers.any())).thenReturn(masterKeyProvider2); + cryptoKPPlugins.add(cryptoPlugin2); + this.cryptoPlugin2 = cryptoPlugin2; + + CryptoKeyProviderPlugin cryptoPluginCreationFailure = Mockito.mock(CryptoKeyProviderPlugin.class); + pluginTypeWithCreationFailure = UUID.randomUUID().toString(); + Mockito.when(cryptoPluginCreationFailure.type()).thenReturn(pluginTypeWithCreationFailure); + Mockito.when(cryptoPluginCreationFailure.createKeyProvider(ArgumentMatchers.any())) + .thenThrow(new RuntimeException("Injected failure")); + cryptoKPPlugins.add(cryptoPluginCreationFailure); + + cryptoManagerRegistry = new TestCryptoHandlerRegistry(new TestCryptoPlugin(), cryptoKPPlugins, Settings.EMPTY); + } + + static class TestCryptoPlugin implements CryptoPlugin { + + @Override + public CryptoHandler getOrCreateCryptoHandler( + MasterKeyProvider keyProvider, + String keyProviderName, + String keyProviderType, + Runnable onClose + ) { + return Mockito.mock(CryptoHandler.class); + } + } + + static class TestCryptoHandlerRegistry extends CryptoHandlerRegistry { + + protected TestCryptoHandlerRegistry(CryptoPlugin cryptoPlugin, List cryptoPlugins, Settings settings) { + super(List.of(cryptoPlugin), cryptoPlugins, settings); + } + + @Override + public Map loadCryptoFactories(List cryptoKPPlugins) { + return super.loadCryptoFactories(cryptoKPPlugins); + } + } + + public void testInitRegistryWithDuplicateKPType() { + List cryptoPlugins = new ArrayList<>(); + CryptoKeyProviderPlugin cryptoPlugin1 = Mockito.mock(CryptoKeyProviderPlugin.class); + String pluginType = UUID.randomUUID().toString(); + Mockito.when(cryptoPlugin1.type()).thenReturn(pluginType); + cryptoPlugins.add(cryptoPlugin1); + CryptoKeyProviderPlugin cryptoPlugin2 = Mockito.mock(CryptoKeyProviderPlugin.class); + Mockito.when(cryptoPlugin2.type()).thenReturn(pluginType); + cryptoPlugins.add(cryptoPlugin2); + expectThrows(IllegalArgumentException.class, () -> cryptoManagerRegistry.loadCryptoFactories(cryptoPlugins)); + } + + public void testRegistry() { + List cryptoPlugins = new ArrayList<>(); + CryptoKeyProviderPlugin cryptoPlugin1 = Mockito.mock(CryptoKeyProviderPlugin.class); + String pluginType1 = UUID.randomUUID().toString(); + Mockito.when(cryptoPlugin1.type()).thenReturn(pluginType1); + MasterKeyProvider masterKeyProvider1 = Mockito.mock(MasterKeyProvider.class); + Mockito.when(cryptoPlugin1.createKeyProvider(Mockito.any())).thenReturn(masterKeyProvider1); + cryptoPlugins.add(cryptoPlugin1); + + CryptoKeyProviderPlugin cryptoPlugin2 = Mockito.mock(CryptoKeyProviderPlugin.class); + String pluginType2 = UUID.randomUUID().toString(); + Mockito.when(cryptoPlugin2.type()).thenReturn(pluginType2); + MasterKeyProvider masterKeyProvider2 = Mockito.mock(MasterKeyProvider.class); + Mockito.when(cryptoPlugin2.createKeyProvider(Mockito.any())).thenReturn(masterKeyProvider2); + cryptoPlugins.add(cryptoPlugin2); + + Map loadedPlugins = cryptoManagerRegistry.loadCryptoFactories(cryptoPlugins); + + CryptoKeyProviderPlugin keyProviderPlugin = loadedPlugins.get(pluginType1); + assertNotNull(keyProviderPlugin); + assertEquals(cryptoPlugin1, keyProviderPlugin); + + keyProviderPlugin = loadedPlugins.get(pluginType2); + assertNotNull(keyProviderPlugin); + assertEquals(cryptoPlugin2, keyProviderPlugin); + } + + public void testCryptoManagerMissing() { + String pluginName = UUID.randomUUID().toString(); + String pluginType = UUID.randomUUID().toString(); + CryptoMetadata cryptoMetadata = new CryptoMetadata(pluginName, pluginType, Settings.EMPTY); + expectThrows(CryptoRegistryException.class, () -> cryptoManagerRegistry.fetchCryptoHandler(cryptoMetadata)); + } + + public void testCryptoManagerCreationFailure() { + String pluginName = UUID.randomUUID().toString(); + CryptoMetadata cryptoMetadata = new CryptoMetadata(pluginName, pluginTypeWithCreationFailure, Settings.EMPTY); + expectThrows(CryptoRegistryException.class, () -> cryptoManagerRegistry.fetchCryptoHandler(cryptoMetadata)); + } + + public void testCryptoManagerCreationSuccess() { + + String pluginName1 = UUID.randomUUID().toString(); + CryptoMetadata cryptoMetadata = new CryptoMetadata(pluginName1, cryptoPlugin1.type(), Settings.EMPTY); + CryptoHandler cryptoHandler = cryptoManagerRegistry.fetchCryptoHandler(cryptoMetadata); + assertNotNull(cryptoHandler); + + String pluginName2 = UUID.randomUUID().toString(); + CryptoHandler cryptoHandler2 = cryptoManagerRegistry.fetchCryptoHandler( + new CryptoMetadata(pluginName2, cryptoPlugin2.type(), Settings.EMPTY) + ); + assertNotNull(cryptoHandler2); + CryptoHandler cryptoHandler3 = cryptoManagerRegistry.fetchCryptoHandler( + new CryptoMetadata(pluginName1, cryptoPlugin1.type(), Settings.EMPTY) + ); + assertNotNull(cryptoHandler3); + assertEquals(cryptoHandler, cryptoHandler3); + assertNotEquals(cryptoHandler2, cryptoHandler); + + CryptoHandler cryptoHandlerNewType = cryptoManagerRegistry.fetchCryptoHandler( + new CryptoMetadata(pluginName1, cryptoPlugin2.type(), Settings.EMPTY) + ); + assertNotNull(cryptoHandlerNewType); + assertNotEquals(cryptoHandler, cryptoHandlerNewType); + assertNotEquals(cryptoHandler2, cryptoHandlerNewType); + assertNotEquals(cryptoHandler3, cryptoHandlerNewType); + } +} diff --git a/server/src/test/java/org/opensearch/crypto/CryptoRegistryExceptionTests.java b/server/src/test/java/org/opensearch/crypto/CryptoRegistryExceptionTests.java new file mode 100644 index 0000000000000..91f86a2b4104a --- /dev/null +++ b/server/src/test/java/org/opensearch/crypto/CryptoRegistryExceptionTests.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.crypto; + +import org.opensearch.core.common.io.stream.InputStreamStreamInput; +import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; + +public class CryptoRegistryExceptionTests extends OpenSearchTestCase { + + public void testConstructorWithClientNameAndType() { + String clientName = "test-client"; + String clientType = "test-type"; + CryptoRegistryException exception = new CryptoRegistryException(clientName, clientType); + + assertEquals(RestStatus.NOT_FOUND, exception.status()); + assertEquals(clientName, exception.getName()); + assertEquals(clientType, exception.getType()); + } + + public void testConstructorWithClientNameTypeAndCause() { + String clientName = "test-client"; + String clientType = "test-type"; + String causeMessage = "Something went wrong."; + Throwable cause = new Throwable(causeMessage); + CryptoRegistryException exception = new CryptoRegistryException(clientName, clientType, cause); + + assertEquals(RestStatus.INTERNAL_SERVER_ERROR, exception.status()); + assertEquals(clientName, exception.getName()); + assertEquals(clientType, exception.getType()); + assertEquals(cause, exception.getCause()); + } + + public void testConstructorWithClientNameTypeAndIllegalArgsCause() { + String clientName = "test-client"; + String clientType = "test-type"; + String causeMessage = "Bad arguments."; + IllegalArgumentException cause = new IllegalArgumentException(causeMessage); + ; + CryptoRegistryException exception = new CryptoRegistryException(clientName, clientType, cause); + + assertEquals(RestStatus.BAD_REQUEST, exception.status()); + assertEquals(clientName, exception.getName()); + assertEquals(clientType, exception.getType()); + assertEquals(cause, exception.getCause()); + } + + public void testConstructorWithClientNameTypeAndCustomMessage() { + String clientName = "TestClient"; + String clientType = "TestType"; + String customMessage = "Invalid client data."; + CryptoRegistryException exception = new CryptoRegistryException(clientName, clientType, customMessage); + + assertEquals(RestStatus.INTERNAL_SERVER_ERROR, exception.status()); + assertEquals(clientName, exception.getName()); + assertEquals(clientType, exception.getType()); + } + + public void testSerializationAndDeserialization() throws IOException { + String clientName = "TestClient"; + String clientType = "TestType"; + CryptoRegistryException originalException = new CryptoRegistryException(clientName, clientType); + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + StreamOutput streamOutput = new OutputStreamStreamOutput(outputStream); + originalException.writeTo(streamOutput); + + byte[] byteArray = outputStream.toByteArray(); + ByteArrayInputStream inputStream = new ByteArrayInputStream(byteArray); + StreamInput streamInput = new InputStreamStreamInput(inputStream); + CryptoRegistryException deserializedException = new CryptoRegistryException(streamInput); + + assertEquals(originalException.getMessage(), deserializedException.getMessage()); + assertEquals(originalException.status(), deserializedException.status()); + assertEquals(originalException.getName(), deserializedException.getName()); + assertEquals(originalException.getType(), deserializedException.getType()); + } +} diff --git a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java index 4a987c9a6fe02..b33ebf8333b36 100644 --- a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.Coordinator; +import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.service.ClusterApplier; @@ -45,7 +46,9 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.gateway.GatewayMetaState; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.plugins.DiscoveryPlugin; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.ThreadPool; @@ -76,6 +79,8 @@ public class DiscoveryModuleTests extends OpenSearchTestCase { private ClusterSettings clusterSettings; private GatewayMetaState gatewayMetaState; + private RemoteStoreNodeService remoteStoreNodeService; + public interface DummyHostsProviderPlugin extends DiscoveryPlugin { Map> impl(); @@ -92,12 +97,13 @@ default Map> getSeedHostProviders( public void setupDummyServices() { threadPool = mock(ThreadPool.class); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, NoopTracer.INSTANCE); clusterManagerService = mock(ClusterManagerService.class); namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); clusterApplier = mock(ClusterApplier.class); clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); gatewayMetaState = mock(GatewayMetaState.class); + remoteStoreNodeService = mock(RemoteStoreNodeService.class); } @After @@ -120,7 +126,9 @@ private DiscoveryModule newModule(Settings settings, List plugi createTempDir().toAbsolutePath(), gatewayMetaState, mock(RerouteService.class), - null + null, + new PersistedStateRegistry(), + remoteStoreNodeService ); } diff --git a/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java b/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java index ac2bcfe92ebaf..f4515361a89b8 100644 --- a/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java +++ b/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java @@ -42,6 +42,7 @@ import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -99,7 +100,8 @@ private void createTransportSvc() { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override public BoundTransportAddress boundAddress() { @@ -114,7 +116,8 @@ public BoundTransportAddress boundAddress() { transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, - null + null, + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java b/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java index cf53bd9251b65..0d694bcfa135b 100644 --- a/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java +++ b/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java @@ -44,6 +44,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; @@ -122,7 +123,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req TransportService.NOOP_TRANSPORT_INTERCEPTOR, address -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); transportService.start(); diff --git a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java index 76520f9684c23..f861ab90896db 100644 --- a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java @@ -44,6 +44,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.PeerFinder.TransportAddressConnector; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.test.transport.CapturingTransport.CapturedRequest; @@ -242,7 +243,8 @@ public void setup() { boundTransportAddress -> localNode, null, emptySet(), - connectionManager + connectionManager, + NoopTracer.INSTANCE ); transportService.start(); diff --git a/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java b/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java index d2a1be87388ad..421f6c6fe279b 100644 --- a/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java +++ b/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java @@ -48,6 +48,7 @@ import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -184,7 +185,8 @@ public void testRemovingLocalAddresses() { networkService, PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override @@ -203,7 +205,8 @@ public BoundTransportAddress boundAddress() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); closeables.push(transportService); final List transportAddresses = SeedHostsResolver.resolveHostsLists( @@ -235,7 +238,8 @@ public void testUnknownHost() { networkService, PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override @@ -261,7 +265,8 @@ public TransportAddress[] addressesFromString(String address) throws UnknownHost TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); closeables.push(transportService); @@ -289,7 +294,8 @@ public void testResolveTimeout() { networkService, PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override @@ -326,7 +332,8 @@ public TransportAddress[] addressesFromString(String address) throws UnknownHost TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); closeables.push(transportService); final TimeValue resolveTimeout = TimeValue.timeValueSeconds(randomIntBetween(3, 5)); @@ -364,7 +371,8 @@ public void testCancellationOnClose() throws InterruptedException { networkService, PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override @@ -402,7 +410,8 @@ public TransportAddress[] addressesFromString(String address) throws UnknownHost TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); closeables.push(transportService); recreateSeedHostsResolver( @@ -427,7 +436,8 @@ public void testInvalidHosts() throws IllegalAccessException { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override public BoundTransportAddress boundAddress() { @@ -446,7 +456,8 @@ public BoundTransportAddress boundAddress() { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); closeables.push(transportService); final List transportAddresses = SeedHostsResolver.resolveHostsLists( diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java index 1a9d1e3b4acfa..41b0d8f7a45e7 100644 --- a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java +++ b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java @@ -36,6 +36,7 @@ import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.discovery.InitializeExtensionRequest; import org.opensearch.env.Environment; import org.opensearch.env.EnvironmentSettingsResponse; import org.opensearch.extensions.ExtensionsSettings.Extension; @@ -45,6 +46,7 @@ import org.opensearch.identity.IdentityService; import org.opensearch.plugins.ExtensionAwarePlugin; import org.opensearch.rest.RestController; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; @@ -76,6 +78,7 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -112,7 +115,8 @@ public void setup() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); transportService = new MockTransportService( settings, @@ -128,7 +132,8 @@ public void setup() throws Exception { Version.CURRENT ), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); actionModule = mock(ActionModule.class); extAwarePlugin = new ExtensionAwarePlugin() { @@ -406,19 +411,93 @@ public void testInitialize() throws Exception { ) ); - // Test needs to be changed to mock the connection between the local node and an extension. Assert statment is commented out for - // now. + // Test needs to be changed to mock the connection between the local node and an extension. // Link to issue: https://github.com/opensearch-project/OpenSearch/issues/4045 // mockLogAppender.assertAllExpectationsMatched(); } } + public void testInitializeExtension() throws Exception { + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + + TransportService mockTransportService = spy( + new TransportService( + Settings.EMPTY, + mock(Transport.class), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, + null, + Collections.emptySet(), + NoopTracer.INSTANCE + ) + ); + + doNothing().when(mockTransportService).connectToExtensionNode(any(DiscoveryExtensionNode.class)); + + doNothing().when(mockTransportService) + .sendRequest(any(DiscoveryExtensionNode.class), anyString(), any(InitializeExtensionRequest.class), any()); + + extensionsManager.initializeServicesAndRestHandler( + actionModule, + settingsModule, + mockTransportService, + clusterService, + settings, + client + ); + + Extension firstExtension = new Extension( + "firstExtension", + "uniqueid1", + "127.0.0.0", + "9301", + "0.0.7", + "2.0.0", + "2.0.0", + List.of(), + null + ); + + extensionsManager.initializeExtension(firstExtension); + + Extension secondExtension = new Extension( + "secondExtension", + "uniqueid2", + "127.0.0.0", + "9301", + "0.0.7", + "2.0.0", + "2.0.0", + List.of(), + null + ); + + extensionsManager.initializeExtension(secondExtension); + + ThreadPool.terminate(threadPool, 3, TimeUnit.SECONDS); + + verify(mockTransportService, times(2)).connectToExtensionNode(any(DiscoveryExtensionNode.class)); + + verify(mockTransportService, times(2)).sendRequest( + any(DiscoveryExtensionNode.class), + anyString(), + any(InitializeExtensionRequest.class), + any() + ); + } + public void testHandleRegisterRestActionsRequest() throws Exception { ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "2.8.0", "2.8.0", List.of(), null) + ); + List actionsList = List.of("GET /foo foo", "PUT /bar bar", "POST /baz baz"); List deprecatedActionsList = List.of("GET /deprecated/foo foo_deprecated", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -428,6 +507,58 @@ public void testHandleRegisterRestActionsRequest() throws Exception { assertTrue(((AcknowledgedResponse) response).getStatus()); } + public void testHandleRegisterRestActionsRequestRequiresDiscoveryNode() throws Exception { + + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + initialize(extensionsManager); + + RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest("uniqueId1", List.of(), List.of()); + + expectThrows( + IllegalStateException.class, + () -> extensionsManager.getRestActionsRequestHandler() + .handleRegisterRestActionsRequest(registerActionsRequest, actionModule.getDynamicActionRegistry()) + ); + } + + public void testHandleRegisterRestActionsRequestMultiple() throws Exception { + + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); + initialize(extensionsManager); + + List actionsList = List.of("GET /foo foo", "PUT /bar bar", "POST /baz baz"); + List deprecatedActionsList = List.of("GET /deprecated/foo foo_deprecated", "It's deprecated!"); + for (int i = 0; i < 2; i++) { + String uniqueIdStr = "uniqueid-%d" + i; + + Set> additionalSettings = extAwarePlugin.getExtensionSettings().stream().collect(Collectors.toSet()); + ExtensionScopedSettings extensionScopedSettings = new ExtensionScopedSettings(additionalSettings); + Extension firstExtension = new Extension( + "Extension %s" + i, + uniqueIdStr, + "127.0.0.0", + "9300", + "0.0.7", + "2.8.0", + "2.8.0", + List.of(), + extensionScopedSettings + ); + + extensionsManager.loadExtension(firstExtension); + + RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest( + uniqueIdStr, + actionsList, + deprecatedActionsList + ); + TransportResponse response = extensionsManager.getRestActionsRequestHandler() + .handleRegisterRestActionsRequest(registerActionsRequest, actionModule.getDynamicActionRegistry()); + assertEquals(AcknowledgedResponse.class, response.getClass()); + assertTrue(((AcknowledgedResponse) response).getStatus()); + } + } + public void testHandleRegisterSettingsRequest() throws Exception { ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); initialize(extensionsManager); @@ -449,6 +580,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidMethod() throws Excep initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "2.8.0", "2.8.0", List.of(), null) + ); List actionsList = List.of("FOO /foo", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("GET /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -464,6 +598,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedMethod() th initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "2.8.0", "2.8.0", List.of(), null) + ); List actionsList = List.of("GET /foo", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("FOO /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -478,6 +615,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidUri() throws Exceptio ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "2.8.0", "2.8.0", List.of(), null) + ); List actionsList = List.of("GET", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("GET /deprecated/foo", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -492,6 +632,9 @@ public void testHandleRegisterRestActionsRequestWithInvalidDeprecatedUri() throw ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; + extensionsManager.loadExtension( + new Extension("firstExtension", uniqueIdStr, "127.0.0.0", "9300", "0.0.7", "2.8.0", "2.8.0", List.of(), null) + ); List actionsList = List.of("GET /foo", "PUT /bar", "POST /baz"); List deprecatedActionsList = List.of("GET", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); @@ -760,7 +903,8 @@ public void testRegisterHandler() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ) ); extensionsManager.initializeServicesAndRestHandler( @@ -783,7 +927,7 @@ public void testIncompatibleExtensionRegistration() throws IOException { "127.0.0.0", "9300", "0.0.7", - "3.0.0", + "2.8.0", "3.99.0", List.of(), null diff --git a/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java b/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java index 924ad748126b8..7b9b146734ef4 100644 --- a/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java +++ b/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java @@ -24,6 +24,7 @@ import org.opensearch.extensions.AcknowledgedResponse; import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.extensions.rest.RestSendToExtensionActionTests; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.client.NoOpNodeClient; import org.opensearch.test.transport.MockTransportService; @@ -67,7 +68,8 @@ public void setup() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); transportService = new MockTransportService( settings, @@ -83,7 +85,8 @@ public void setup() throws Exception { Version.CURRENT ), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); discoveryExtensionNode = new DiscoveryExtensionNode( "firstExtension", diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java index 455c9eef19bc7..e6be2be870361 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java @@ -19,9 +19,11 @@ import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.extensions.ExtensionsManager; -import org.opensearch.extensions.ExtensionsSettings; +import org.opensearch.extensions.ExtensionsSettings.Extension; import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestChannel; import org.opensearch.test.rest.FakeRestRequest; @@ -65,7 +67,8 @@ public void setup() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); transportService = new MockTransportService( settings, @@ -81,7 +84,8 @@ public void setup() throws Exception { Version.CURRENT ), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); } @@ -154,8 +158,8 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettings() th // optionally, you can stub out some methods: when(spy.getAdditionalSettings()).thenCallRealMethod(); - Mockito.doCallRealMethod().when(spy).loadExtension(any(ExtensionsSettings.Extension.class)); - Mockito.doNothing().when(spy).initialize(); + Mockito.doCallRealMethod().when(spy).loadExtension(any(Extension.class)); + Mockito.doNothing().when(spy).initializeExtensionNode(any(DiscoveryExtensionNode.class)); RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(spy); final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" @@ -171,10 +175,10 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettings() th FakeRestChannel channel = new FakeRestChannel(request, false, 0); restInitializeExtensionAction.handleRequest(request, channel, null); - assertEquals(channel.capturedResponse().status(), RestStatus.ACCEPTED); + assertEquals(RestStatus.ACCEPTED, channel.capturedResponse().status()); assertTrue(channel.capturedResponse().content().utf8ToString().contains("A request to initialize an extension has been sent.")); - Optional extension = spy.lookupExtensionSettingsById("ad-extension"); + Optional extension = spy.lookupExtensionSettingsById("ad-extension"); assertTrue(extension.isPresent()); assertEquals(true, extension.get().getAdditionalSettings().get(boolSetting)); assertEquals("customSetting", extension.get().getAdditionalSettings().get(stringSetting)); @@ -201,8 +205,8 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettingsUsing // optionally, you can stub out some methods: when(spy.getAdditionalSettings()).thenCallRealMethod(); - Mockito.doCallRealMethod().when(spy).loadExtension(any(ExtensionsSettings.Extension.class)); - Mockito.doNothing().when(spy).initialize(); + Mockito.doCallRealMethod().when(spy).loadExtension(any(Extension.class)); + Mockito.doNothing().when(spy).initializeExtensionNode(any(DiscoveryExtensionNode.class)); RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(spy); final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" @@ -218,10 +222,10 @@ public void testRestInitializeExtensionActionResponseWithAdditionalSettingsUsing FakeRestChannel channel = new FakeRestChannel(request, false, 0); restInitializeExtensionAction.handleRequest(request, channel, null); - assertEquals(channel.capturedResponse().status(), RestStatus.ACCEPTED); + assertEquals(RestStatus.ACCEPTED, channel.capturedResponse().status()); assertTrue(channel.capturedResponse().content().utf8ToString().contains("A request to initialize an extension has been sent.")); - Optional extension = spy.lookupExtensionSettingsById("ad-extension"); + Optional extension = spy.lookupExtensionSettingsById("ad-extension"); assertTrue(extension.isPresent()); assertEquals(false, extension.get().getAdditionalSettings().get(boolSetting)); assertEquals("default", extension.get().getAdditionalSettings().get(stringSetting)); diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java index 350e23f060383..0fb8c146e8ba4 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java @@ -32,6 +32,7 @@ import org.opensearch.rest.NamedRoute; import org.opensearch.rest.RestHandler.Route; import org.opensearch.rest.RestRequest.Method; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -77,7 +78,8 @@ public void setup() throws Exception { new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); transportService = new MockTransportService( settings, @@ -93,7 +95,8 @@ public void setup() throws Exception { Version.CURRENT ), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); discoveryExtensionNode = new DiscoveryExtensionNode( "firstExtension", @@ -145,7 +148,7 @@ public void testRestSendToExtensionAction() throws Exception { dynamicActionRegistry ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; expected.add(new Route(Method.GET, uriPrefix + "/foo")); @@ -177,7 +180,7 @@ public void testRestSendToExtensionActionWithNamedRoute() throws Exception { dynamicActionRegistry ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; NamedRoute nr1 = new NamedRoute.Builder().method(Method.GET).path(uriPrefix + "/foo").uniqueName("foo").build(); @@ -222,7 +225,7 @@ public void testRestSendToExtensionActionWithNamedRouteAndLegacyActionName() thr dynamicActionRegistry ); - assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + assertEquals("uniqueid1:send_to_extension_action", restSendToExtensionAction.getName()); List expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; NamedRoute nr1 = new NamedRoute.Builder().method(Method.GET) diff --git a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java index c83da46b23fb1..1c43bb565ef69 100644 --- a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java +++ b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java @@ -40,6 +40,11 @@ import org.opensearch.cluster.metadata.MetadataIndexStateService; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.ClusterSettings; @@ -48,10 +53,14 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.repositories.IndexId; import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.Function; @@ -269,6 +278,108 @@ public void testUpdateRoutingTable() { } } + public void testRoutingTableUpdateWhenRemoteStateRecovery() { + final int numOfShards = randomIntBetween(1, 10); + + final IndexMetadata remoteMetadata = createIndexMetadata( + "test-remote", + Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .build() + ); + + // Test remote index routing table is generated with ExistingStoreRecoverySource + { + final Index index = remoteMetadata.getIndex(); + final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata(Metadata.builder().put(remoteMetadata, false).build()) + .build(); + final ClusterState newState = updateRoutingTable(initialState); + IndexRoutingTable newRemoteIndexRoutingTable = newState.routingTable().index(remoteMetadata.getIndex()); + assertTrue(newState.routingTable().hasIndex(index)); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.INDEX_CREATED) + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) + ) + ); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource + ) + ); + + } + + // Test remote index routing table is overridden if recovery source is RemoteStoreRecoverySource + { + final Index index = remoteMetadata.getIndex(); + Map routingTableMap = new HashMap<>(); + for (int shardNumber = 0; shardNumber < remoteMetadata.getNumberOfShards(); shardNumber++) { + ShardId shardId = new ShardId(index, shardNumber); + routingTableMap.put(shardId, new IndexShardRoutingTable.Builder(new ShardId(remoteMetadata.getIndex(), 1)).build()); + } + IndexRoutingTable.Builder remoteBuilderWithRemoteRecovery = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) + .initializeAsRemoteStoreRestore( + remoteMetadata, + new RecoverySource.RemoteStoreRecoverySource( + UUIDs.randomBase64UUID(), + remoteMetadata.getCreationVersion(), + new IndexId(remoteMetadata.getIndex().getName(), remoteMetadata.getIndexUUID()) + ), + routingTableMap, + true + ); + final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE) + .metadata(Metadata.builder().put(remoteMetadata, false).build()) + .routingTable(new RoutingTable.Builder().add(remoteBuilderWithRemoteRecovery.build()).build()) + .build(); + assertTrue(initialState.routingTable().hasIndex(index)); + final ClusterState newState = updateRoutingTable(initialState); + IndexRoutingTable newRemoteIndexRoutingTable = newState.routingTable().index(remoteMetadata.getIndex()); + assertTrue(newState.routingTable().hasIndex(index)); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.INDEX_CREATED) + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.unassignedInfo().getReason().equals(UnassignedInfo.Reason.CLUSTER_RECOVERED) + ) + ); + assertEquals( + 0, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.RemoteStoreRecoverySource + ) + ); + assertEquals( + numOfShards, + newRemoteIndexRoutingTable.shardsMatchingPredicateCount( + shardRouting -> shardRouting.recoverySource() instanceof RecoverySource.EmptyStoreRecoverySource + ) + ); + + } + } + public void testMixCurrentAndRecoveredState() { final ClusterState currentState = ClusterState.builder(ClusterState.EMPTY_STATE) .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK).build()) diff --git a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java index c6b44eaa9d364..74bae7b5eb7cf 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -35,12 +35,16 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.tests.store.MockDirectoryWrapper; import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfigExclusion; import org.opensearch.cluster.coordination.CoordinationState; +import org.opensearch.cluster.coordination.CoordinationState.PersistedState; +import org.opensearch.cluster.coordination.PersistedStateRegistry; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Manifest; import org.opensearch.cluster.metadata.Metadata; @@ -48,6 +52,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; @@ -59,7 +64,16 @@ import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.env.TestEnvironment; +import org.opensearch.gateway.GatewayMetaState.RemotePersistedState; +import org.opensearch.gateway.PersistedClusterStateService.Writer; +import org.opensearch.gateway.remote.ClusterMetadataManifest; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.gateway.remote.RemotePersistenceStats; +import org.opensearch.index.recovery.RemoteStoreRestoreService; +import org.opensearch.index.recovery.RemoteStoreRestoreService.RemoteRestoreResult; import org.opensearch.node.Node; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -72,23 +86,44 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Locale; +import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; import static org.opensearch.test.NodeRoles.nonClusterManagerNode; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.doCallRealMethod; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; public class GatewayMetaStatePersistedStateTests extends OpenSearchTestCase { + private NodeEnvironment nodeEnvironment; private ClusterName clusterName; private Settings settings; private DiscoveryNode localNode; private BigArrays bigArrays; + private MockGatewayMetaState gateway; + @Override public void setUp() throws Exception { bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); @@ -108,14 +143,22 @@ public void setUp() throws Exception { @Override public void tearDown() throws Exception { nodeEnvironment.close(); + IOUtils.close(gateway); super.tearDown(); } - private CoordinationState.PersistedState newGatewayPersistedState() { - final MockGatewayMetaState gateway = new MockGatewayMetaState(localNode, bigArrays); - gateway.start(settings, nodeEnvironment, xContentRegistry()); + private CoordinationState.PersistedState newGatewayPersistedState() throws IOException { + IOUtils.close(gateway); + gateway = new MockGatewayMetaState(localNode, bigArrays); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + gateway.start(settings, nodeEnvironment, xContentRegistry(), persistedStateRegistry); final CoordinationState.PersistedState persistedState = gateway.getPersistedState(); assertThat(persistedState, instanceOf(GatewayMetaState.LucenePersistedState.class)); + assertThat( + persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL), + instanceOf(GatewayMetaState.LucenePersistedState.class) + ); + assertThat(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE), nullValue()); return persistedState; } @@ -412,7 +455,10 @@ public void testDataOnlyNodePersistence() throws Exception { cleanup.add(gateway); final TransportService transportService = mock(TransportService.class); TestThreadPool threadPool = new TestThreadPool("testMarkAcceptedConfigAsCommittedOnDataOnlyNode"); - cleanup.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); + cleanup.add(() -> { + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + threadPool.shutdown(); + }); when(transportService.getThreadPool()).thenReturn(threadPool); ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn( @@ -425,6 +471,27 @@ public void testDataOnlyNodePersistence() throws Exception { new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L ); + Supplier remoteClusterStateServiceSupplier = () -> { + if (isRemoteStoreClusterStateEnabled(settings)) { + return new RemoteClusterStateService( + nodeEnvironment.nodeId(), + () -> new RepositoriesService( + settings, + clusterService, + transportService, + Collections.emptyMap(), + Collections.emptyMap(), + transportService.getThreadPool() + ), + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + () -> 0L, + threadPool + ); + } else { + return null; + } + }; gateway.start( settings, transportService, @@ -432,7 +499,10 @@ public void testDataOnlyNodePersistence() throws Exception { new MetaStateService(nodeEnvironment, xContentRegistry()), null, null, - persistedClusterStateService + persistedClusterStateService, + remoteClusterStateServiceSupplier.get(), + new PersistedStateRegistry(), + null ); final CoordinationState.PersistedState persistedState = gateway.getPersistedState(); assertThat(persistedState, instanceOf(GatewayMetaState.AsyncLucenePersistedState.class)); @@ -647,6 +717,415 @@ Directory createDirectory(Path path) { } } + public void testRemotePersistedState() throws IOException { + final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); + final ClusterMetadataManifest manifest = ClusterMetadataManifest.builder().clusterTerm(1L).stateVersion(5L).build(); + final String previousClusterUUID = "prev-cluster-uuid"; + Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any(), Mockito.any())).thenReturn(manifest); + + Mockito.when(remoteClusterStateService.writeIncrementalMetadata(Mockito.any(), Mockito.any(), Mockito.any())).thenReturn(manifest); + CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService, previousClusterUUID); + + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat(remotePersistedState.getCurrentTerm(), equalTo(0L)); + + final long clusterTerm = randomNonNegativeLong(); + final ClusterState clusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + ); + + remotePersistedState.setLastAcceptedState(clusterState); + Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterState, previousClusterUUID); + + assertThat(remotePersistedState.getLastAcceptedState(), equalTo(clusterState)); + assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); + + final ClusterState secondClusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + ); + + remotePersistedState.setLastAcceptedState(secondClusterState); + Mockito.verify(remoteClusterStateService, times(1)).writeFullMetadata(secondClusterState, previousClusterUUID); + + assertThat(remotePersistedState.getLastAcceptedState(), equalTo(secondClusterState)); + assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); + + remotePersistedState.markLastAcceptedStateAsCommitted(); + Mockito.verify(remoteClusterStateService, times(1)).markLastStateAsCommitted(Mockito.any(), Mockito.any()); + + assertThat(remotePersistedState.getLastAcceptedState(), equalTo(secondClusterState)); + assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); + assertThat(remotePersistedState.getLastAcceptedState().metadata().clusterUUIDCommitted(), equalTo(false)); + + final ClusterState thirdClusterState = ClusterState.builder(secondClusterState) + .metadata(Metadata.builder(secondClusterState.getMetadata()).clusterUUID(randomAlphaOfLength(10)).build()) + .build(); + remotePersistedState.setLastAcceptedState(thirdClusterState); + remotePersistedState.markLastAcceptedStateAsCommitted(); + assertThat(remotePersistedState.getLastAcceptedState().metadata().clusterUUIDCommitted(), equalTo(true)); + } + + public void testRemotePersistedStateNotCommitted() throws IOException { + final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); + final String previousClusterUUID = "prev-cluster-uuid"; + final ClusterMetadataManifest manifest = ClusterMetadataManifest.builder() + .previousClusterUUID(previousClusterUUID) + .clusterTerm(1L) + .stateVersion(5L) + .build(); + Mockito.when(remoteClusterStateService.getLatestClusterMetadataManifest(Mockito.any(), Mockito.any())) + .thenReturn(Optional.of(manifest)); + Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any(), Mockito.any())).thenReturn(manifest); + + Mockito.when(remoteClusterStateService.writeIncrementalMetadata(Mockito.any(), Mockito.any(), Mockito.any())).thenReturn(manifest); + CoordinationState.PersistedState remotePersistedState = new RemotePersistedState( + remoteClusterStateService, + ClusterState.UNKNOWN_UUID + ); + + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat(remotePersistedState.getCurrentTerm(), equalTo(0L)); + + final long clusterTerm = randomNonNegativeLong(); + ClusterState clusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + ); + clusterState = ClusterState.builder(clusterState) + .metadata(Metadata.builder(clusterState.getMetadata()).clusterUUID(randomAlphaOfLength(10)).clusterUUIDCommitted(false).build()) + .build(); + + remotePersistedState.setLastAcceptedState(clusterState); + ArgumentCaptor previousClusterUUIDCaptor = ArgumentCaptor.forClass(String.class); + ArgumentCaptor clusterStateCaptor = ArgumentCaptor.forClass(ClusterState.class); + Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterStateCaptor.capture(), previousClusterUUIDCaptor.capture()); + assertEquals(previousClusterUUID, previousClusterUUIDCaptor.getValue()); + } + + public void testRemotePersistedStateExceptionOnFullStateUpload() throws IOException { + final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); + final String previousClusterUUID = "prev-cluster-uuid"; + Mockito.doThrow(IOException.class).when(remoteClusterStateService).writeFullMetadata(Mockito.any(), Mockito.any()); + + CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService, previousClusterUUID); + + final long clusterTerm = randomNonNegativeLong(); + final ClusterState clusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + ); + + assertThrows(OpenSearchException.class, () -> remotePersistedState.setLastAcceptedState(clusterState)); + } + + public void testRemotePersistedStateFailureStats() throws IOException { + RemotePersistenceStats remoteStateStats = new RemotePersistenceStats(); + final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); + final String previousClusterUUID = "prev-cluster-uuid"; + Mockito.doThrow(IOException.class).when(remoteClusterStateService).writeFullMetadata(Mockito.any(), Mockito.any()); + when(remoteClusterStateService.getStats()).thenReturn(remoteStateStats); + doCallRealMethod().when(remoteClusterStateService).writeMetadataFailed(); + CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService, previousClusterUUID); + + final long clusterTerm = randomNonNegativeLong(); + final ClusterState clusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(clusterTerm).build()).build() + ); + + assertThrows(OpenSearchException.class, () -> remotePersistedState.setLastAcceptedState(clusterState)); + assertEquals(1, remoteClusterStateService.getStats().getFailedCount()); + assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); + } + + public void testGatewayForRemoteState() throws IOException { + MockGatewayMetaState gateway = null; + try { + RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")).thenReturn("test-cluster-uuid"); + RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenReturn( + RemoteRestoreResult.build("test-cluster-uuid", null, ClusterState.EMPTY_STATE) + ); + gateway = new MockGatewayMetaState(localNode, bigArrays, remoteClusterStateService, remoteStoreRestoreService); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + "randomRepoName" + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + "randomRepoName" + ); + + Settings settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, "randomRepoName") + .put(stateRepoTypeAttributeKey, FsRepository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build(); + gateway.start(settings, nodeEnvironment, xContentRegistry(), persistedStateRegistry); + + final CoordinationState.PersistedState persistedState = gateway.getPersistedState(); + assertThat(persistedState, instanceOf(GatewayMetaState.LucenePersistedState.class)); + assertThat( + persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL), + instanceOf(GatewayMetaState.LucenePersistedState.class) + ); + assertThat( + persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE), + instanceOf(GatewayMetaState.RemotePersistedState.class) + ); + } finally { + IOUtils.close(gateway); + } + } + + public void testGatewayForRemoteStateForInitialBootstrap() throws IOException { + MockGatewayMetaState gateway = null; + try { + final RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote(clusterName.value())).thenReturn(ClusterState.UNKNOWN_UUID); + + final RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenReturn( + RemoteRestoreResult.build("test-cluster-uuid", null, ClusterState.EMPTY_STATE) + ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + gateway = newGatewayForRemoteState( + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + ClusterState.EMPTY_STATE, + false + ); + final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); + PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); + verify(remoteClusterStateService).getLastKnownUUIDFromRemote(Mockito.any()); // change this + verifyNoInteractions(remoteStoreRestoreService); + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat(lucenePersistedState.getLastAcceptedState().metadata(), equalTo(ClusterState.EMPTY_STATE.metadata())); + } finally { + IOUtils.close(gateway); + } + } + + public void testGatewayForRemoteStateForNodeReplacement() throws IOException { + MockGatewayMetaState gateway = null; + try { + final RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")).thenReturn("test-cluster-uuid"); + final ClusterState previousState = createClusterState( + randomNonNegativeLong(), + Metadata.builder() + .coordinationMetadata(CoordinationMetadata.builder().term(randomLong()).build()) + .put( + IndexMetadata.builder("test-index1") + .settings(settings(Version.CURRENT).put(SETTING_INDEX_UUID, randomAlphaOfLength(10))) + .numberOfShards(5) + .numberOfReplicas(1) + .build(), + false + ) + .clusterUUID(randomAlphaOfLength(10)) + .build() + ); + when(remoteClusterStateService.getLastKnownUUIDFromRemote(clusterName.value())).thenReturn( + previousState.metadata().clusterUUID() + ); + + final RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenReturn( + RemoteRestoreResult.build("test-cluster-uuid", null, previousState) + ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + gateway = newGatewayForRemoteState( + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + ClusterState.EMPTY_STATE, + false + ); + final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); + PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); + verify(remoteClusterStateService).getLastKnownUUIDFromRemote(Mockito.any()); + verify(remoteStoreRestoreService).restore(any(), any(), anyBoolean(), any()); + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat(lucenePersistedState.getLastAcceptedState().metadata(), equalTo(previousState.metadata())); + } finally { + IOUtils.close(gateway); + } + } + + public void testGatewayForRemoteStateForNodeReboot() throws IOException { + MockGatewayMetaState gateway = null; + try { + final RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + final RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + final IndexMetadata indexMetadata = IndexMetadata.builder("test-index1") + .settings(settings(Version.CURRENT).put(SETTING_INDEX_UUID, randomAlphaOfLength(10))) + .numberOfShards(5) + .numberOfReplicas(1) + .build(); + final ClusterState clusterState = createClusterState( + randomNonNegativeLong(), + Metadata.builder() + .coordinationMetadata(CoordinationMetadata.builder().term(randomLong()).build()) + .put(indexMetadata, false) + .clusterUUID(randomAlphaOfLength(10)) + .build() + ); + gateway = newGatewayForRemoteState( + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + clusterState, + false + ); + final CoordinationState.PersistedState lucenePersistedState = gateway.getPersistedState(); + PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); + verifyNoInteractions(remoteClusterStateService); + verifyNoInteractions(remoteStoreRestoreService); + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + logger.info("lucene state metadata: {}", lucenePersistedState.getLastAcceptedState().toString()); + logger.info("initial metadata: {}", clusterState.toString()); + assertThat(lucenePersistedState.getLastAcceptedState().metadata().indices().size(), equalTo(1)); + assertThat(lucenePersistedState.getLastAcceptedState().metadata().indices().get("test-index1"), equalTo(indexMetadata)); + } finally { + IOUtils.close(gateway); + } + } + + public void testGatewayForRemoteStateForInitialBootstrapBlocksApplied() throws IOException { + MockGatewayMetaState gateway = null; + try { + final RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote(clusterName.value())).thenReturn("test-cluster-uuid"); + + final IndexMetadata indexMetadata = IndexMetadata.builder("test-index1") + .settings( + settings(Version.CURRENT).put(SETTING_INDEX_UUID, randomAlphaOfLength(10)) + .put(IndexMetadata.INDEX_READ_ONLY_SETTING.getKey(), true) + ) + .numberOfShards(5) + .numberOfReplicas(1) + .build(); + + final ClusterState clusterState = ClusterState.builder( + createClusterState( + randomNonNegativeLong(), + Metadata.builder() + .coordinationMetadata(CoordinationMetadata.builder().term(randomLong()).build()) + .put(indexMetadata, false) + .clusterUUID(ClusterState.UNKNOWN_UUID) + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build() + ) + ).nodes(DiscoveryNodes.EMPTY_NODES).build(); + + final RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenReturn( + RemoteRestoreResult.build("test-cluster-uuid", null, clusterState) + ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + gateway = newGatewayForRemoteState( + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + ClusterState.EMPTY_STATE, + true + ); + PersistedState remotePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE); + PersistedState lucenePersistedState = persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL); + verify(remoteClusterStateService).getLastKnownUUIDFromRemote(clusterName.value()); // change this + verify(remoteStoreRestoreService).restore(any(ClusterState.class), any(String.class), anyBoolean(), any(String[].class)); + assertThat(remotePersistedState.getLastAcceptedState(), nullValue()); + assertThat( + Metadata.isGlobalStateEquals(lucenePersistedState.getLastAcceptedState().metadata(), clusterState.metadata()), + equalTo(true) + ); + assertThat( + lucenePersistedState.getLastAcceptedState().blocks().hasGlobalBlock(Metadata.CLUSTER_READ_ONLY_BLOCK), + equalTo(true) + ); + assertThat( + IndexMetadata.INDEX_READ_ONLY_SETTING.get( + lucenePersistedState.getLastAcceptedState().metadata().index("test-index1").getSettings() + ), + equalTo(true) + ); + } finally { + IOUtils.close(gateway); + } + } + + private MockGatewayMetaState newGatewayForRemoteState( + RemoteClusterStateService remoteClusterStateService, + RemoteStoreRestoreService remoteStoreRestoreService, + PersistedStateRegistry persistedStateRegistry, + ClusterState currentState, + boolean prepareFullState + ) throws IOException { + MockGatewayMetaState gateway = new MockGatewayMetaState(localNode, bigArrays, prepareFullState); + String randomRepoName = "randomRepoName"; + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + randomRepoName + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + randomRepoName + ); + Settings settingWithRemoteStateEnabled = Settings.builder() + .put(settings) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, randomRepoName) + .put(stateRepoTypeAttributeKey, FsRepository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build(); + final TransportService transportService = mock(TransportService.class); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getClusterSettings()).thenReturn( + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + when(transportService.getLocalNode()).thenReturn(mock(DiscoveryNode.class)); + final PersistedClusterStateService persistedClusterStateService = new PersistedClusterStateService( + nodeEnvironment, + xContentRegistry(), + getBigArrays(), + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + () -> 0L + ); + if (!ClusterState.EMPTY_STATE.equals(currentState)) { + Writer writer = persistedClusterStateService.createWriter(); + writer.writeFullStateAndCommit(currentState.term(), currentState); + writer.close(); + } + final MetaStateService metaStateService = mock(MetaStateService.class); + when(metaStateService.loadFullState()).thenReturn(new Tuple<>(Manifest.empty(), ClusterState.EMPTY_STATE.metadata())); + gateway.start( + settingWithRemoteStateEnabled, + transportService, + clusterService, + metaStateService, + null, + null, + persistedClusterStateService, + remoteClusterStateService, + persistedStateRegistry, + remoteStoreRestoreService + ); + return gateway; + } + private static BigArrays getBigArrays() { return usually() ? BigArrays.NON_RECYCLING_INSTANCE diff --git a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java new file mode 100644 index 0000000000000..6c9a3201656d7 --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java @@ -0,0 +1,281 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.Version; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; +import org.opensearch.test.EqualsHashCodeTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class ClusterMetadataManifestTests extends OpenSearchTestCase { + + public void testClusterMetadataManifestXContentV0() throws IOException { + UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path"); + ClusterMetadataManifest originalManifest = new ClusterMetadataManifest( + 1L, + 1L, + "test-cluster-uuid", + "test-state-uuid", + Version.CURRENT, + "test-node-id", + false, + ClusterMetadataManifest.CODEC_V0, + null, + Collections.singletonList(uploadedIndexMetadata), + "prev-cluster-uuid", + true + ); + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + originalManifest.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + final ClusterMetadataManifest fromXContentManifest = ClusterMetadataManifest.fromXContentV0(parser); + assertEquals(originalManifest, fromXContentManifest); + } + } + + public void testClusterMetadataManifestXContent() throws IOException { + UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path"); + ClusterMetadataManifest originalManifest = new ClusterMetadataManifest( + 1L, + 1L, + "test-cluster-uuid", + "test-state-uuid", + Version.CURRENT, + "test-node-id", + false, + ClusterMetadataManifest.CODEC_V1, + "test-global-metadata-file", + Collections.singletonList(uploadedIndexMetadata), + "prev-cluster-uuid", + true + ); + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + originalManifest.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + final ClusterMetadataManifest fromXContentManifest = ClusterMetadataManifest.fromXContent(parser); + assertEquals(originalManifest, fromXContentManifest); + } + } + + public void testClusterMetadataManifestSerializationEqualsHashCode() { + ClusterMetadataManifest initialManifest = new ClusterMetadataManifest( + 1337L, + 7L, + "HrYF3kP5SmSPWtKlWhnNSA", + "6By9p9G0Rv2MmFYJcPAOgA", + Version.CURRENT, + "B10RX1f5RJenMQvYccCgSQ", + true, + 1, + "test-global-metadata-file", + randomUploadedIndexMetadataList(), + "yfObdx8KSMKKrXf8UyHhM", + true + ); + { // Mutate Cluster Term + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.clusterTerm(1338L); + return builder.build(); + } + ); + } + { // Mutate State Version + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.stateVersion(8L); + return builder.build(); + } + ); + } + { // Mutate Cluster UUID + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.clusterUUID("efOkMiPbQZCUQQgtFWdbPw"); + return builder.build(); + } + ); + } + { // Mutate State UUID + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.stateUUID("efOkMiPbQZCUQQgtFWdbPw"); + return builder.build(); + } + ); + } + { // Mutate OpenSearch Version + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.opensearchVersion(Version.V_EMPTY); + return builder.build(); + } + ); + } + { // Mutate Committed State + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.committed(false); + return builder.build(); + } + ); + } + { // Mutate Indices + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.indices(randomUploadedIndexMetadataList()); + return builder.build(); + } + ); + } + { // Mutate Previous cluster UUID + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.previousClusterUUID("vZX62DCQEOzGXlxXCrEu"); + return builder.build(); + } + ); + + } + { // Mutate cluster uuid committed + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + initialManifest, + orig -> OpenSearchTestCase.copyWriteable( + orig, + new NamedWriteableRegistry(Collections.emptyList()), + ClusterMetadataManifest::new + ), + manifest -> { + ClusterMetadataManifest.Builder builder = ClusterMetadataManifest.builder(manifest); + builder.clusterUUIDCommitted(false); + return builder.build(); + } + ); + } + } + + private List randomUploadedIndexMetadataList() { + final int size = randomIntBetween(1, 10); + final List uploadedIndexMetadataList = new ArrayList<>(size); + while (uploadedIndexMetadataList.size() < size) { + assertTrue(uploadedIndexMetadataList.add(randomUploadedIndexMetadata())); + } + return uploadedIndexMetadataList; + } + + private UploadedIndexMetadata randomUploadedIndexMetadata() { + return new UploadedIndexMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + + public void testUploadedIndexMetadataSerializationEqualsHashCode() { + UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path"); + EqualsHashCodeTestUtils.checkEqualsAndHashCode( + uploadedIndexMetadata, + orig -> OpenSearchTestCase.copyWriteable(orig, new NamedWriteableRegistry(Collections.emptyList()), UploadedIndexMetadata::new), + metadata -> randomlyChangingUploadedIndexMetadata(uploadedIndexMetadata) + ); + } + + private UploadedIndexMetadata randomlyChangingUploadedIndexMetadata(UploadedIndexMetadata uploadedIndexMetadata) { + switch (randomInt(2)) { + case 0: + return new UploadedIndexMetadata( + randomAlphaOfLength(10), + uploadedIndexMetadata.getIndexUUID(), + uploadedIndexMetadata.getUploadedFilename() + ); + case 1: + return new UploadedIndexMetadata( + uploadedIndexMetadata.getIndexName(), + randomAlphaOfLength(10), + uploadedIndexMetadata.getUploadedFilename() + ); + case 2: + return new UploadedIndexMetadata( + uploadedIndexMetadata.getIndexName(), + uploadedIndexMetadata.getIndexUUID(), + randomAlphaOfLength(10) + ); + } + return uploadedIndexMetadata; + } +} diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java new file mode 100644 index 0000000000000..65477051cdb30 --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -0,0 +1,1540 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterModule; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.cluster.metadata.IndexGraveyard; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.blobstore.transfer.RemoteTransferContainer; +import org.opensearch.common.compress.DeflateCompressor; +import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.common.network.NetworkModule; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.index.Index; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.indices.IndicesModule; +import org.opensearch.repositories.FilterRepository; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.VersionUtils; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; + +import java.io.ByteArrayInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Stream; + +import org.mockito.ArgumentCaptor; +import org.mockito.ArgumentMatchers; + +import static java.util.stream.Collectors.toList; +import static org.opensearch.gateway.remote.RemoteClusterStateService.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateService.FORMAT_PARAMS; +import static org.opensearch.gateway.remote.RemoteClusterStateService.INDEX_METADATA_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.RemoteClusterStateService.MANIFEST_FILE_PREFIX; +import static org.opensearch.gateway.remote.RemoteClusterStateService.METADATA_FILE_PREFIX; +import static org.opensearch.gateway.remote.RemoteClusterStateService.RETAINED_MANIFESTS; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class RemoteClusterStateServiceTests extends OpenSearchTestCase { + + private RemoteClusterStateService remoteClusterStateService; + private ClusterSettings clusterSettings; + private Supplier repositoriesServiceSupplier; + private RepositoriesService repositoriesService; + private BlobStoreRepository blobStoreRepository; + private BlobStore blobStore; + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @Before + public void setup() { + repositoriesServiceSupplier = mock(Supplier.class); + repositoriesService = mock(RepositoriesService.class); + when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService); + + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + "remote_store_repository" + ); + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + "remote_store_repository" + ); + + Settings settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, "remote_store_repository") + .put(stateRepoTypeAttributeKey, FsRepository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build(); + + clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + NamedXContentRegistry xContentRegistry = new NamedXContentRegistry( + Stream.of( + NetworkModule.getNamedXContents().stream(), + IndicesModule.getNamedXContents().stream(), + ClusterModule.getNamedXWriteables().stream() + ).flatMap(Function.identity()).collect(toList()) + ); + + blobStoreRepository = mock(BlobStoreRepository.class); + blobStore = mock(BlobStore.class); + when(blobStoreRepository.blobStore()).thenReturn(blobStore); + when(repositoriesService.repository("remote_store_repository")).thenReturn(blobStoreRepository); + when(blobStoreRepository.getNamedXContentRegistry()).thenReturn(xContentRegistry); + remoteClusterStateService = new RemoteClusterStateService( + "test-node-id", + repositoriesServiceSupplier, + settings, + clusterSettings, + () -> 0L, + threadPool + ); + } + + @After + public void teardown() throws Exception { + super.tearDown(); + remoteClusterStateService.close(); + threadPool.shutdown(); + } + + public void testFailWriteFullMetadataNonClusterManagerNode() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().build(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)); + Assert.assertThat(manifest, nullValue()); + } + + public void testFailInitializationWhenRemoteStateDisabled() { + final Settings settings = Settings.builder().build(); + assertThrows( + AssertionError.class, + () -> new RemoteClusterStateService( + "test-node-id", + repositoriesServiceSupplier, + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + () -> 0L, + threadPool + ) + ); + } + + public void testFailInitializeWhenRepositoryNotSet() { + doThrow(new RepositoryMissingException("repository missing")).when(repositoriesService).repository("remote_store_repository"); + assertThrows(RepositoryMissingException.class, () -> remoteClusterStateService.start()); + } + + public void testFailWriteFullMetadataWhenNotBlobRepository() { + final FilterRepository filterRepository = mock(FilterRepository.class); + when(repositoriesService.repository("remote_store_repository")).thenReturn(filterRepository); + assertThrows(AssertionError.class, () -> remoteClusterStateService.start()); + } + + public void testWriteFullMetadataSuccess() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + mockBlobStoreObjects(); + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid"); + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + List indices = List.of(uploadedIndexMetadata); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(indices) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") + .build(); + + assertThat(manifest.getIndices().size(), is(1)); + assertThat(manifest.getIndices().get(0).getIndexName(), is(uploadedIndexMetadata.getIndexName())); + assertThat(manifest.getIndices().get(0).getIndexUUID(), is(uploadedIndexMetadata.getIndexUUID())); + assertThat(manifest.getIndices().get(0).getUploadedFilename(), notNullValue()); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + assertThat(manifest.getPreviousClusterUUID(), is(expectedManifest.getPreviousClusterUUID())); + } + + public void testWriteFullMetadataInParallelSuccess() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); + + ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + ArgumentCaptor writeContextArgumentCaptor = ArgumentCaptor.forClass(WriteContext.class); + AtomicReference capturedWriteContext = new AtomicReference<>(); + doAnswer((i) -> { + actionListenerArgumentCaptor.getValue().onResponse(null); + return null; + }).doAnswer((i) -> { + actionListenerArgumentCaptor.getValue().onResponse(null); + capturedWriteContext.set(writeContextArgumentCaptor.getValue()); + return null; + }).doAnswer((i) -> { + actionListenerArgumentCaptor.getValue().onResponse(null); + return null; + }).when(container).asyncBlobUpload(writeContextArgumentCaptor.capture(), actionListenerArgumentCaptor.capture()); + + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid"); + + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + List indices = List.of(uploadedIndexMetadata); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(indices) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") + .build(); + + assertThat(manifest.getIndices().size(), is(1)); + assertThat(manifest.getIndices().get(0).getIndexName(), is(uploadedIndexMetadata.getIndexName())); + assertThat(manifest.getIndices().get(0).getIndexUUID(), is(uploadedIndexMetadata.getIndexUUID())); + assertThat(manifest.getIndices().get(0).getUploadedFilename(), notNullValue()); + assertThat(manifest.getGlobalMetadataFileName(), notNullValue()); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + assertThat(manifest.getPreviousClusterUUID(), is(expectedManifest.getPreviousClusterUUID())); + + assertEquals(actionListenerArgumentCaptor.getAllValues().size(), 3); + assertEquals(writeContextArgumentCaptor.getAllValues().size(), 3); + + byte[] writtenBytes = capturedWriteContext.get() + .getStreamProvider(Integer.MAX_VALUE) + .provideStream(0) + .getInputStream() + .readAllBytes(); + IndexMetadata writtenIndexMetadata = RemoteClusterStateService.INDEX_METADATA_FORMAT.deserialize( + capturedWriteContext.get().getFileName(), + blobStoreRepository.getNamedXContentRegistry(), + new BytesArray(writtenBytes) + ); + + assertEquals(capturedWriteContext.get().getWritePriority(), WritePriority.URGENT); + assertEquals(writtenIndexMetadata.getNumberOfShards(), 1); + assertEquals(writtenIndexMetadata.getNumberOfReplicas(), 0); + assertEquals(writtenIndexMetadata.getIndex().getName(), "test-index"); + assertEquals(writtenIndexMetadata.getIndex().getUUID(), "index-uuid"); + long expectedChecksum = RemoteTransferContainer.checksumOfChecksum(new ByteArrayIndexInput("metadata-filename", writtenBytes), 8); + if (capturedWriteContext.get().doRemoteDataIntegrityCheck()) { + assertEquals(capturedWriteContext.get().getExpectedChecksum().longValue(), expectedChecksum); + } else { + assertEquals(capturedWriteContext.get().getExpectedChecksum(), null); + } + + } + + public void testWriteFullMetadataFailureForGlobalMetadata() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); + + ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + + doAnswer((i) -> { + // For async write action listener will be called from different thread, replicating same behaviour here. + new Thread(new Runnable() { + @Override + public void run() { + actionListenerArgumentCaptor.getValue().onFailure(new RuntimeException("Cannot upload to remote")); + } + }).start(); + return null; + }).when(container).asyncBlobUpload(any(WriteContext.class), actionListenerArgumentCaptor.capture()); + + remoteClusterStateService.start(); + assertThrows( + RemoteClusterStateService.RemoteStateTransferException.class, + () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) + ); + } + + public void testTimeoutWhileWritingManifestFile() throws IOException { + // verify update metadata manifest upload timeout + int metadataManifestUploadTimeout = 2; + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.metadata_manifest.upload_timeout", metadataManifestUploadTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); + + ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + + doAnswer((i) -> { // For Global Metadata + actionListenerArgumentCaptor.getValue().onResponse(null); + return null; + }).doAnswer((i) -> { // For Index Metadata + actionListenerArgumentCaptor.getValue().onResponse(null); + return null; + }).doAnswer((i) -> { + // For Manifest file perform No Op, so latch in code will timeout + return null; + }).when(container).asyncBlobUpload(any(WriteContext.class), actionListenerArgumentCaptor.capture()); + + remoteClusterStateService.start(); + try { + remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)); + } catch (Exception e) { + assertTrue(e instanceof RemoteClusterStateService.RemoteStateTransferException); + assertTrue(e.getMessage().contains("Timed out waiting for transfer of manifest file to complete")); + } + } + + public void testWriteFullMetadataInParallelFailureForIndexMetadata() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); + + ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + + doAnswer((i) -> { + actionListenerArgumentCaptor.getValue().onResponse(null); + return null; + }).doAnswer((i) -> { + actionListenerArgumentCaptor.getValue().onFailure(new RuntimeException("Cannot upload to remote")); + return null; + }).when(container).asyncBlobUpload(any(WriteContext.class), actionListenerArgumentCaptor.capture()); + + remoteClusterStateService.start(); + assertThrows( + RemoteClusterStateService.RemoteStateTransferException.class, + () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) + ); + assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); + } + + public void testFailWriteIncrementalMetadataNonClusterManagerNode() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().build(); + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeIncrementalMetadata(clusterState, clusterState, null); + Assert.assertThat(manifest, nullValue()); + assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); + } + + public void testFailWriteIncrementalMetadataWhenTermChanged() { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(2L).build(); + final ClusterState previousClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .build(); + assertThrows( + AssertionError.class, + () -> remoteClusterStateService.writeIncrementalMetadata(previousClusterState, clusterState, null) + ); + } + + public void testWriteIncrementalMetadataSuccess() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState previousClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .build(); + + final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder().indices(Collections.emptyList()).build(); + + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeIncrementalMetadata( + previousClusterState, + clusterState, + previousManifest + ); + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + final List indices = List.of(uploadedIndexMetadata); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(indices) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") + .build(); + + assertThat(manifest.getIndices().size(), is(1)); + assertThat(manifest.getIndices().get(0).getIndexName(), is(uploadedIndexMetadata.getIndexName())); + assertThat(manifest.getIndices().get(0).getIndexUUID(), is(uploadedIndexMetadata.getIndexUUID())); + assertThat(manifest.getIndices().get(0).getUploadedFilename(), notNullValue()); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + } + + /* + * Here we will verify the migration of manifest file from codec V0 and V1. + * + * Initially codec version is 0 and global metadata is also null, we will perform index metadata update. + * In final manifest codec version should be 1 and + * global metadata should be updated, even if it was not changed in this cluster state update + */ + public void testMigrationFromCodecV0ManifestToCodecV1Manifest() throws IOException { + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState previousClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .nodes(nodesWithLocalNodeClusterManager()) + .build(); + + // Update only index metadata + final IndexMetadata indexMetadata = new IndexMetadata.Builder("test").settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, "uuid") + .build() + ).numberOfShards(1).numberOfReplicas(0).build(); + Metadata newMetadata = Metadata.builder(previousClusterState.metadata()).put(indexMetadata, true).build(); + ClusterState newClusterState = ClusterState.builder(previousClusterState).metadata(newMetadata).build(); + + // previous manifest with codec 0 and null global metadata + final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder() + .codecVersion(ClusterMetadataManifest.CODEC_V0) + .globalMetadataFileName(null) + .indices(Collections.emptyList()) + .build(); + + remoteClusterStateService.start(); + final ClusterMetadataManifest manifestAfterUpdate = remoteClusterStateService.writeIncrementalMetadata( + previousClusterState, + newClusterState, + previousManifest + ); + + // global metadata is updated + assertThat(manifestAfterUpdate.getGlobalMetadataFileName(), notNullValue()); + // Manifest file with codec version with 1 is updated. + assertThat(manifestAfterUpdate.getCodecVersion(), is(ClusterMetadataManifest.CODEC_V1)); + } + + public void testWriteIncrementalGlobalMetadataSuccess() throws IOException { + final ClusterState clusterState = generateClusterStateWithGlobalMetadata().nodes(nodesWithLocalNodeClusterManager()).build(); + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState previousClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .build(); + + final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder() + .codecVersion(2) + .globalMetadataFileName("global-metadata-file") + .indices(Collections.emptyList()) + .build(); + + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeIncrementalMetadata( + previousClusterState, + clusterState, + previousManifest + ); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(Collections.emptyList()) + .globalMetadataFileName("mock-filename") + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .previousClusterUUID("prev-cluster-uuid") + .build(); + + assertThat(manifest.getGlobalMetadataFileName(), notNullValue()); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + } + + /* + * Here we will verify index metadata is not uploaded again if change is only in global metadata + */ + public void testGlobalMetadataOnlyUpdated() throws IOException { + // setup + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState initialClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata).version(randomNonNegativeLong())) + .build(); + final ClusterMetadataManifest initialManifest = ClusterMetadataManifest.builder() + .codecVersion(2) + .globalMetadataFileName("global-metadata-file") + .indices(Collections.emptyList()) + .build(); + remoteClusterStateService.start(); + + // Initial cluster state with index. + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + // Updating remote cluster state with changing index metadata + final ClusterMetadataManifest manifestAfterIndexMetadataUpdate = remoteClusterStateService.writeIncrementalMetadata( + initialClusterState, + clusterState, + initialManifest + ); + + // new cluster state where only global metadata is different + Metadata newMetadata = Metadata.builder(clusterState.metadata()) + .persistentSettings(Settings.builder().put("cluster.blocks.read_only", true).build()) + .version(randomNonNegativeLong()) + .build(); + ClusterState newClusterState = ClusterState.builder(clusterState).metadata(newMetadata).build(); + + // updating remote cluster state with global metadata + final ClusterMetadataManifest manifestAfterGlobalMetadataUpdate = remoteClusterStateService.writeIncrementalMetadata( + clusterState, + newClusterState, + manifestAfterIndexMetadataUpdate + ); + + // Verify that index metadata information is same in manifest files + assertThat(manifestAfterIndexMetadataUpdate.getIndices().size(), is(manifestAfterGlobalMetadataUpdate.getIndices().size())); + assertThat( + manifestAfterIndexMetadataUpdate.getIndices().get(0).getIndexName(), + is(manifestAfterGlobalMetadataUpdate.getIndices().get(0).getIndexName()) + ); + assertThat( + manifestAfterIndexMetadataUpdate.getIndices().get(0).getIndexUUID(), + is(manifestAfterGlobalMetadataUpdate.getIndices().get(0).getIndexUUID()) + ); + + // since timestamp is part of file name, if file name is same we can confirm that file is not update in global metadata update + assertThat( + manifestAfterIndexMetadataUpdate.getIndices().get(0).getUploadedFilename(), + is(manifestAfterGlobalMetadataUpdate.getIndices().get(0).getUploadedFilename()) + ); + + // global metadata file would have changed + assertFalse( + manifestAfterIndexMetadataUpdate.getGlobalMetadataFileName() + .equalsIgnoreCase(manifestAfterGlobalMetadataUpdate.getGlobalMetadataFileName()) + ); + } + + /* + * Here we will verify global metadata is not uploaded again if change is only in index metadata + */ + public void testIndexMetadataOnlyUpdated() throws IOException { + // setup + mockBlobStoreObjects(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final ClusterState initialClusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(coordinationMetadata)) + .build(); + final ClusterMetadataManifest initialManifest = ClusterMetadataManifest.builder() + .codecVersion(2) + .indices(Collections.emptyList()) + .build(); + remoteClusterStateService.start(); + + // Initial cluster state with global metadata. + final ClusterState clusterState = generateClusterStateWithGlobalMetadata().nodes(nodesWithLocalNodeClusterManager()).build(); + + // Updating remote cluster state with changing global metadata + final ClusterMetadataManifest manifestAfterGlobalMetadataUpdate = remoteClusterStateService.writeIncrementalMetadata( + initialClusterState, + clusterState, + initialManifest + ); + + // new cluster state where only Index metadata is different + final IndexMetadata indexMetadata = new IndexMetadata.Builder("test").settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, "uuid") + .build() + ).numberOfShards(1).numberOfReplicas(0).build(); + Metadata newMetadata = Metadata.builder(clusterState.metadata()).put(indexMetadata, true).build(); + ClusterState newClusterState = ClusterState.builder(clusterState).metadata(newMetadata).build(); + + // updating remote cluster state with index metadata + final ClusterMetadataManifest manifestAfterIndexMetadataUpdate = remoteClusterStateService.writeIncrementalMetadata( + clusterState, + newClusterState, + manifestAfterGlobalMetadataUpdate + ); + + // Verify that global metadata information is same in manifest files after updating index Metadata + // since timestamp is part of file name, if file name is same we can confirm that file is not update in index metadata update + assertThat( + manifestAfterIndexMetadataUpdate.getGlobalMetadataFileName(), + is(manifestAfterGlobalMetadataUpdate.getGlobalMetadataFileName()) + ); + + // Index metadata would have changed + assertThat(manifestAfterGlobalMetadataUpdate.getIndices().size(), is(0)); + assertThat(manifestAfterIndexMetadataUpdate.getIndices().size(), is(1)); + } + + public void testReadLatestMetadataManifestFailedIOException() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + + BlobContainer blobContainer = mockBlobStoreObjects(); + when(blobContainer.listBlobsByPrefixInSortedOrder("manifest" + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC)) + .thenThrow(IOException.class); + + remoteClusterStateService.start(); + Exception e = assertThrows( + IllegalStateException.class, + () -> remoteClusterStateService.getLatestClusterMetadataManifest( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ) + ); + assertEquals(e.getMessage(), "Error while fetching latest manifest file for remote cluster state"); + } + + public void testReadLatestMetadataManifestFailedNoManifestFileInRemote() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + + BlobContainer blobContainer = mockBlobStoreObjects(); + when(blobContainer.listBlobsByPrefixInSortedOrder("manifest" + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC)) + .thenReturn(List.of()); + + remoteClusterStateService.start(); + Optional manifest = remoteClusterStateService.getLatestClusterMetadataManifest( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); + assertEquals(manifest, Optional.empty()); + } + + public void testReadLatestMetadataManifestFailedManifestFileRemoveAfterFetchInRemote() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + + BlobContainer blobContainer = mockBlobStoreObjects(); + BlobMetadata blobMetadata = new PlainBlobMetadata("manifestFileName", 1); + when(blobContainer.listBlobsByPrefixInSortedOrder("manifest" + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC)) + .thenReturn(Arrays.asList(blobMetadata)); + when(blobContainer.readBlob("manifestFileName")).thenThrow(FileNotFoundException.class); + + remoteClusterStateService.start(); + Exception e = assertThrows( + IllegalStateException.class, + () -> remoteClusterStateService.getLatestClusterMetadataManifest( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ) + ); + assertEquals(e.getMessage(), "Error while downloading cluster metadata - manifestFileName"); + } + + public void testReadLatestMetadataManifestSuccessButNoIndexMetadata() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(List.of()) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") + .codecVersion(ClusterMetadataManifest.CODEC_V0) + .build(); + + BlobContainer blobContainer = mockBlobStoreObjects(); + mockBlobContainer(blobContainer, expectedManifest, Map.of()); + + remoteClusterStateService.start(); + assertEquals( + remoteClusterStateService.getLatestClusterState(clusterState.getClusterName().value(), clusterState.metadata().clusterUUID()) + .getMetadata() + .getIndices() + .size(), + 0 + ); + } + + public void testReadLatestMetadataManifestSuccessButIndexMetadataFetchIOException() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + final List indices = List.of(uploadedIndexMetadata); + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(indices) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") + .build(); + + BlobContainer blobContainer = mockBlobStoreObjects(); + mockBlobContainer(blobContainer, expectedManifest, Map.of()); + when(blobContainer.readBlob(uploadedIndexMetadata.getUploadedFilename() + ".dat")).thenThrow(FileNotFoundException.class); + + remoteClusterStateService.start(); + Exception e = assertThrows( + IllegalStateException.class, + () -> remoteClusterStateService.getLatestClusterState( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ).getMetadata().getIndices() + ); + assertEquals(e.getMessage(), "Error while downloading IndexMetadata - " + uploadedIndexMetadata.getUploadedFilename()); + } + + public void testReadLatestMetadataManifestSuccess() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + final List indices = List.of(uploadedIndexMetadata); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(indices) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .codecVersion(ClusterMetadataManifest.CODEC_V0) + .previousClusterUUID("prev-cluster-uuid") + .build(); + + mockBlobContainer(mockBlobStoreObjects(), expectedManifest, new HashMap<>()); + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.getLatestClusterMetadataManifest( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ).get(); + + assertThat(manifest.getIndices().size(), is(1)); + assertThat(manifest.getIndices().get(0).getIndexName(), is(uploadedIndexMetadata.getIndexName())); + assertThat(manifest.getIndices().get(0).getIndexUUID(), is(uploadedIndexMetadata.getIndexUUID())); + assertThat(manifest.getIndices().get(0).getUploadedFilename(), notNullValue()); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + } + + public void testReadGlobalMetadata() throws IOException { + when(blobStoreRepository.getNamedXContentRegistry()).thenReturn(new NamedXContentRegistry( + List.of(new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(IndexGraveyard.TYPE), IndexGraveyard::fromXContent)))); + final ClusterState clusterState = generateClusterStateWithGlobalMetadata().nodes(nodesWithLocalNodeClusterManager()).build(); + remoteClusterStateService.start(); + + long prevClusterStateVersion = 13L; + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(List.of()) + .clusterTerm(1L) + .stateVersion(prevClusterStateVersion) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .codecVersion(MANIFEST_CURRENT_CODEC_VERSION) + .globalMetadataFileName("global-metadata-file") + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") + .build(); + + Metadata expactedMetadata = Metadata.builder().persistentSettings(Settings.builder().put("readonly", true).build()).build(); + mockBlobContainerForGlobalMetadata(mockBlobStoreObjects(), expectedManifest, expactedMetadata); + + ClusterState newClusterState = remoteClusterStateService.getLatestClusterState( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ); + + assertTrue(Metadata.isGlobalStateEquals(newClusterState.getMetadata(), expactedMetadata)); + + long newClusterStateVersion = newClusterState.getVersion(); + assert prevClusterStateVersion == newClusterStateVersion : String.format( + Locale.ROOT, + "ClusterState version is not restored. previousClusterVersion: [%s] is not equal to current [%s]", + prevClusterStateVersion, + newClusterStateVersion + ); + } + + public void testReadGlobalMetadataIOException() throws IOException { + final ClusterState clusterState = generateClusterStateWithGlobalMetadata().nodes(nodesWithLocalNodeClusterManager()).build(); + remoteClusterStateService.start(); + String globalIndexMetadataName = "global-metadata-file"; + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(List.of()) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .codecVersion(MANIFEST_CURRENT_CODEC_VERSION) + .globalMetadataFileName(globalIndexMetadataName) + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") + .build(); + + Metadata expactedMetadata = Metadata.builder().persistentSettings(Settings.builder().put("readonly", true).build()).build(); + + BlobContainer blobContainer = mockBlobStoreObjects(); + mockBlobContainerForGlobalMetadata(blobContainer, expectedManifest, expactedMetadata); + + when(blobContainer.readBlob(RemoteClusterStateService.GLOBAL_METADATA_FORMAT.blobName(globalIndexMetadataName))).thenThrow( + FileNotFoundException.class + ); + + remoteClusterStateService.start(); + Exception e = assertThrows( + IllegalStateException.class, + () -> remoteClusterStateService.getLatestClusterState( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ) + ); + assertEquals(e.getMessage(), "Error while downloading Global Metadata - " + globalIndexMetadataName); + } + + public void testReadLatestIndexMetadataSuccess() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + remoteClusterStateService.start(); + + final Index index = new Index("test-index", "index-uuid"); + String fileName = "metadata-" + index.getUUID(); + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata(index.getName(), index.getUUID(), fileName); + final Settings idxSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build(); + final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) + .numberOfShards(11) + .numberOfReplicas(10) + .build(); + + final List indices = List.of(uploadedIndexMetadata); + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(indices) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID("prev-cluster-uuid") + .codecVersion(ClusterMetadataManifest.CODEC_V0) + .build(); + + mockBlobContainer(mockBlobStoreObjects(), expectedManifest, Map.of(index.getUUID(), indexMetadata)); + + Map indexMetadataMap = remoteClusterStateService.getLatestClusterState( + clusterState.getClusterName().value(), + clusterState.metadata().clusterUUID() + ).getMetadata().getIndices(); + + assertEquals(indexMetadataMap.size(), 1); + assertEquals(indexMetadataMap.get(index.getName()).getIndex().getName(), index.getName()); + assertEquals(indexMetadataMap.get(index.getName()).getNumberOfShards(), indexMetadata.getNumberOfShards()); + assertEquals(indexMetadataMap.get(index.getName()).getNumberOfReplicas(), indexMetadata.getNumberOfReplicas()); + } + + public void testMarkLastStateAsCommittedSuccess() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + mockBlobStoreObjects(); + remoteClusterStateService.start(); + final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); + List indices = List.of(uploadedIndexMetadata); + final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder().indices(indices).build(); + + final ClusterMetadataManifest manifest = remoteClusterStateService.markLastStateAsCommitted(clusterState, previousManifest); + + final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() + .indices(indices) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID("state-uuid") + .clusterUUID("cluster-uuid") + .nodeId("nodeA") + .previousClusterUUID("prev-cluster-uuid") + .build(); + + assertThat(manifest.getIndices().size(), is(1)); + assertThat(manifest.getIndices().get(0).getIndexName(), is(uploadedIndexMetadata.getIndexName())); + assertThat(manifest.getIndices().get(0).getIndexUUID(), is(uploadedIndexMetadata.getIndexUUID())); + assertThat(manifest.getIndices().get(0).getUploadedFilename(), notNullValue()); + assertThat(manifest.getClusterTerm(), is(expectedManifest.getClusterTerm())); + assertThat(manifest.getStateVersion(), is(expectedManifest.getStateVersion())); + assertThat(manifest.getClusterUUID(), is(expectedManifest.getClusterUUID())); + assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); + } + + public void testGetValidPreviousClusterUUID() throws IOException { + Map clusterUUIDsPointers = Map.of( + "cluster-uuid1", + ClusterState.UNKNOWN_UUID, + "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid3", + "cluster-uuid2" + ); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); + + remoteClusterStateService.start(); + String previousClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster"); + assertThat(previousClusterUUID, equalTo("cluster-uuid3")); + } + + public void testGetValidPreviousClusterUUIDForInvalidChain() throws IOException { + Map clusterUUIDsPointers = Map.of( + "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid3", + "cluster-uuid2", + "cluster-uuid5", + "cluster-uuid4" + ); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); + + remoteClusterStateService.start(); + assertThrows(IllegalStateException.class, () -> remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")); + } + + public void testGetValidPreviousClusterUUIDWithMultipleChains() throws IOException { + Map clusterUUIDsPointers = Map.of( + "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid1", + ClusterState.UNKNOWN_UUID, + "cluster-uuid3", + "cluster-uuid1" + ); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, randomBoolean(), Collections.emptyMap()); + + remoteClusterStateService.start(); + String previousClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster"); + assertThat(previousClusterUUID, equalTo("cluster-uuid3")); + } + + public void testGetValidPreviousClusterUUIDWithInvalidMultipleChains() throws IOException { + Map clusterUUIDsPointers = Map.of( + "cluster-uuid1", + ClusterState.UNKNOWN_UUID, + "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid3", + ClusterState.UNKNOWN_UUID + ); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers); + + remoteClusterStateService.start(); + assertThrows(IllegalStateException.class, () -> remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")); + } + + public void testGetValidPreviousClusterUUIDWhenLastUUIDUncommitted() throws IOException { + Map clusterUUIDsPointers = Map.of( + "cluster-uuid1", + ClusterState.UNKNOWN_UUID, + "cluster-uuid2", + "cluster-uuid1", + "cluster-uuid3", + "cluster-uuid2" + ); + Map clusterUUIDCommitted = Map.of("cluster-uuid1", true, "cluster-uuid2", true, "cluster-uuid3", false); + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, clusterUUIDCommitted); + + remoteClusterStateService.start(); + String previousClusterUUID = remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster"); + assertThat(previousClusterUUID, equalTo("cluster-uuid2")); + } + + public void testDeleteStaleClusterUUIDs() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + ClusterMetadataManifest clusterMetadataManifest = ClusterMetadataManifest.builder() + .indices(List.of()) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID(randomAlphaOfLength(10)) + .clusterUUID("cluster-uuid1") + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID(ClusterState.UNKNOWN_UUID) + .committed(true) + .build(); + + BlobPath blobPath = new BlobPath().add("random-path"); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + BlobContainer uuidContainerContainer = mock(BlobContainer.class); + BlobContainer manifest2Container = mock(BlobContainer.class); + BlobContainer manifest3Container = mock(BlobContainer.class); + when(blobStore.blobContainer(any())).then(invocation -> { + BlobPath blobPath1 = invocation.getArgument(0); + if (blobPath1.buildAsString().endsWith("cluster-state/")) { + return uuidContainerContainer; + } else if (blobPath1.buildAsString().contains("cluster-state/cluster-uuid2/")) { + return manifest2Container; + } else if (blobPath1.buildAsString().contains("cluster-state/cluster-uuid3/")) { + return manifest3Container; + } else { + throw new IllegalArgumentException("Unexpected blob path " + blobPath1); + } + }); + Map blobMetadataMap = Map.of( + "cluster-uuid1", + mock(BlobContainer.class), + "cluster-uuid2", + mock(BlobContainer.class), + "cluster-uuid3", + mock(BlobContainer.class) + ); + when(uuidContainerContainer.children()).thenReturn(blobMetadataMap); + when( + manifest2Container.listBlobsByPrefixInSortedOrder( + MANIFEST_FILE_PREFIX + DELIMITER, + Integer.MAX_VALUE, + BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC + ) + ).thenReturn(List.of(new PlainBlobMetadata("mainfest2", 1L))); + when( + manifest3Container.listBlobsByPrefixInSortedOrder( + MANIFEST_FILE_PREFIX + DELIMITER, + Integer.MAX_VALUE, + BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC + ) + ).thenReturn(List.of(new PlainBlobMetadata("mainfest3", 1L))); + remoteClusterStateService.start(); + remoteClusterStateService.deleteStaleClusterUUIDs(clusterState, clusterMetadataManifest); + try { + assertBusy(() -> { + verify(manifest2Container, times(1)).delete(); + verify(manifest3Container, times(1)).delete(); + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void testRemoteStateStats() throws IOException { + final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); + mockBlobStoreObjects(); + remoteClusterStateService.start(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid"); + + assertTrue(remoteClusterStateService.getStats() != null); + assertEquals(1, remoteClusterStateService.getStats().getSuccessCount()); + assertEquals(0, remoteClusterStateService.getStats().getCleanupAttemptFailedCount()); + assertEquals(0, remoteClusterStateService.getStats().getFailedCount()); + } + + public void testRemoteStateCleanupFailureStats() throws IOException { + BlobContainer blobContainer = mock(BlobContainer.class); + doThrow(IOException.class).when(blobContainer).delete(); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + BlobPath blobPath = new BlobPath().add("random-path"); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + remoteClusterStateService.start(); + remoteClusterStateService.deleteStaleUUIDsClusterMetadata("cluster1", Arrays.asList("cluster-uuid1")); + try { + assertBusy(() -> { + // wait for stats to get updated + assertTrue(remoteClusterStateService.getStats() != null); + assertEquals(0, remoteClusterStateService.getStats().getSuccessCount()); + assertEquals(1, remoteClusterStateService.getStats().getCleanupAttemptFailedCount()); + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void testFileNames() { + final Index index = new Index("test-index", "index-uuid"); + final Settings idxSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build(); + final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + + String indexMetadataFileName = RemoteClusterStateService.indexMetadataFileName(indexMetadata); + String[] splittedIndexMetadataFileName = indexMetadataFileName.split(DELIMITER); + assertThat(indexMetadataFileName.split(DELIMITER).length, is(4)); + assertThat(splittedIndexMetadataFileName[0], is(METADATA_FILE_PREFIX)); + assertThat(splittedIndexMetadataFileName[1], is(RemoteStoreUtils.invertLong(indexMetadata.getVersion()))); + assertThat(splittedIndexMetadataFileName[3], is(String.valueOf(INDEX_METADATA_CURRENT_CODEC_VERSION))); + + int term = randomIntBetween(5, 10); + int version = randomIntBetween(5, 10); + String manifestFileName = RemoteClusterStateService.getManifestFileName(term, version, true); + assertThat(manifestFileName.split(DELIMITER).length, is(6)); + String[] splittedName = manifestFileName.split(DELIMITER); + assertThat(splittedName[0], is(MANIFEST_FILE_PREFIX)); + assertThat(splittedName[1], is(RemoteStoreUtils.invertLong(term))); + assertThat(splittedName[2], is(RemoteStoreUtils.invertLong(version))); + assertThat(splittedName[3], is("C")); + assertThat(splittedName[5], is(String.valueOf(MANIFEST_CURRENT_CODEC_VERSION))); + + manifestFileName = RemoteClusterStateService.getManifestFileName(term, version, false); + splittedName = manifestFileName.split(DELIMITER); + assertThat(splittedName[3], is("P")); + } + + public void testSingleConcurrentExecutionOfStaleManifestCleanup() throws Exception { + BlobContainer blobContainer = mock(BlobContainer.class); + BlobPath blobPath = new BlobPath().add("random-path"); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + + CountDownLatch latch = new CountDownLatch(1); + AtomicInteger callCount = new AtomicInteger(0); + doAnswer(invocation -> { + callCount.incrementAndGet(); + if (latch.await(5000, TimeUnit.SECONDS) == false) { + throw new Exception("Timed out waiting for delete task queuing to complete"); + } + return null; + }).when(blobContainer) + .listBlobsByPrefixInSortedOrder( + any(String.class), + any(int.class), + any(BlobContainer.BlobNameSortOrder.class), + any(ActionListener.class) + ); + + remoteClusterStateService.start(); + remoteClusterStateService.deleteStaleClusterMetadata("cluster-name", "cluster-uuid", RETAINED_MANIFESTS); + remoteClusterStateService.deleteStaleClusterMetadata("cluster-name", "cluster-uuid", RETAINED_MANIFESTS); + + latch.countDown(); + assertBusy(() -> assertEquals(1, callCount.get())); + } + + public void testIndexMetadataUploadWaitTimeSetting() { + // verify default value + assertEquals( + RemoteClusterStateService.INDEX_METADATA_UPLOAD_TIMEOUT_DEFAULT, + remoteClusterStateService.getIndexMetadataUploadTimeout() + ); + + // verify update index metadata upload timeout + int indexMetadataUploadTimeout = randomIntBetween(1, 10); + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.index_metadata.upload_timeout", indexMetadataUploadTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + assertEquals(indexMetadataUploadTimeout, remoteClusterStateService.getIndexMetadataUploadTimeout().seconds()); + } + + public void testMetadataManifestUploadWaitTimeSetting() { + // verify default value + assertEquals( + RemoteClusterStateService.METADATA_MANIFEST_UPLOAD_TIMEOUT_DEFAULT, + remoteClusterStateService.getMetadataManifestUploadTimeout() + ); + + // verify update metadata manifest upload timeout + int metadataManifestUploadTimeout = randomIntBetween(1, 10); + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.metadata_manifest.upload_timeout", metadataManifestUploadTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + assertEquals(metadataManifestUploadTimeout, remoteClusterStateService.getMetadataManifestUploadTimeout().seconds()); + } + + public void testGlobalMetadataUploadWaitTimeSetting() { + // verify default value + assertEquals( + RemoteClusterStateService.GLOBAL_METADATA_UPLOAD_TIMEOUT_DEFAULT, + remoteClusterStateService.getGlobalMetadataUploadTimeout() + ); + + // verify update global metadata upload timeout + int globalMetadataUploadTimeout = randomIntBetween(1, 10); + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.global_metadata.upload_timeout", globalMetadataUploadTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + assertEquals(globalMetadataUploadTimeout, remoteClusterStateService.getGlobalMetadataUploadTimeout().seconds()); + } + + private void mockObjectsForGettingPreviousClusterUUID(Map clusterUUIDsPointers) throws IOException { + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, false, Collections.emptyMap()); + } + + private void mockObjectsForGettingPreviousClusterUUID( + Map clusterUUIDsPointers, + Map clusterUUIDCommitted + ) throws IOException { + mockObjectsForGettingPreviousClusterUUID(clusterUUIDsPointers, false, clusterUUIDCommitted); + } + + private void mockObjectsForGettingPreviousClusterUUID( + Map clusterUUIDsPointers, + boolean differGlobalMetadata, + Map clusterUUIDCommitted + ) throws IOException { + final BlobPath blobPath = mock(BlobPath.class); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + when(blobPath.add(anyString())).thenReturn(blobPath); + when(blobPath.buildAsString()).thenReturn("/blob/path/"); + BlobContainer blobContainer1 = mock(BlobContainer.class); + BlobContainer blobContainer2 = mock(BlobContainer.class); + BlobContainer blobContainer3 = mock(BlobContainer.class); + BlobContainer uuidBlobContainer = mock(BlobContainer.class); + when(blobContainer1.path()).thenReturn(blobPath); + when(blobContainer2.path()).thenReturn(blobPath); + when(blobContainer3.path()).thenReturn(blobPath); + + mockBlobContainerForClusterUUIDs(uuidBlobContainer, clusterUUIDsPointers.keySet()); + List uploadedIndexMetadataList1 = List.of( + new UploadedIndexMetadata("index1", "index-uuid1", "key1"), + new UploadedIndexMetadata("index2", "index-uuid2", "key2") + ); + final ClusterMetadataManifest clusterManifest1 = generateClusterMetadataManifest( + "cluster-uuid1", + clusterUUIDsPointers.get("cluster-uuid1"), + randomAlphaOfLength(10), + uploadedIndexMetadataList1, + "test-metadata1", + clusterUUIDCommitted.getOrDefault("cluster-uuid1", true) + ); + Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build(); + IndexMetadata indexMetadata1 = IndexMetadata.builder("index1") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + IndexMetadata indexMetadata2 = IndexMetadata.builder("index2") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + Metadata metadata1 = Metadata.builder() + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build(); + Map indexMetadataMap1 = Map.of("index-uuid1", indexMetadata1, "index-uuid2", indexMetadata2); + mockBlobContainerForGlobalMetadata(blobContainer1, clusterManifest1, metadata1); + mockBlobContainer(blobContainer1, clusterManifest1, indexMetadataMap1, ClusterMetadataManifest.CODEC_V1); + + List uploadedIndexMetadataList2 = List.of( + new UploadedIndexMetadata("index1", "index-uuid1", "key1"), + new UploadedIndexMetadata("index2", "index-uuid2", "key2") + ); + final ClusterMetadataManifest clusterManifest2 = generateClusterMetadataManifest( + "cluster-uuid2", + clusterUUIDsPointers.get("cluster-uuid2"), + randomAlphaOfLength(10), + uploadedIndexMetadataList2, + "test-metadata2", + clusterUUIDCommitted.getOrDefault("cluster-uuid2", true) + ); + IndexMetadata indexMetadata3 = IndexMetadata.builder("index1") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + IndexMetadata indexMetadata4 = IndexMetadata.builder("index2") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + Metadata metadata2 = Metadata.builder() + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), true).build()) + .build(); + Map indexMetadataMap2 = Map.of("index-uuid1", indexMetadata3, "index-uuid2", indexMetadata4); + mockBlobContainerForGlobalMetadata(blobContainer2, clusterManifest2, metadata2); + mockBlobContainer(blobContainer2, clusterManifest2, indexMetadataMap2, ClusterMetadataManifest.CODEC_V1); + + // differGlobalMetadata controls which one of IndexMetadata or Metadata object would be different + // when comparing cluster-uuid3 and cluster-uuid1 state. + // if set true, only Metadata will differ b/w cluster uuid1 and cluster uuid3. + // If set to false, only IndexMetadata would be different + // Adding difference in EXACTLY on of these randomly will help us test if our uuid trimming logic compares both + // IndexMetadata and Metadata when deciding if the remote state b/w two different cluster uuids is same. + List uploadedIndexMetadataList3 = differGlobalMetadata + ? new ArrayList<>(uploadedIndexMetadataList1) + : List.of(new UploadedIndexMetadata("index1", "index-uuid1", "key1")); + IndexMetadata indexMetadata5 = IndexMetadata.builder("index1") + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + Map indexMetadataMap3 = differGlobalMetadata + ? new HashMap<>(indexMetadataMap1) + : Map.of("index-uuid1", indexMetadata5); + Metadata metadata3 = Metadata.builder() + .persistentSettings(Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), !differGlobalMetadata).build()) + .build(); + + final ClusterMetadataManifest clusterManifest3 = generateClusterMetadataManifest( + "cluster-uuid3", + clusterUUIDsPointers.get("cluster-uuid3"), + randomAlphaOfLength(10), + uploadedIndexMetadataList3, + "test-metadata3", + clusterUUIDCommitted.getOrDefault("cluster-uuid3", true) + ); + mockBlobContainerForGlobalMetadata(blobContainer3, clusterManifest3, metadata3); + mockBlobContainer(blobContainer3, clusterManifest3, indexMetadataMap3, ClusterMetadataManifest.CODEC_V1); + + ArrayList mockBlobContainerOrderedList = new ArrayList<>( + List.of(blobContainer1, blobContainer1, blobContainer3, blobContainer3, blobContainer2, blobContainer2) + ); + + if (differGlobalMetadata) { + mockBlobContainerOrderedList.addAll( + List.of(blobContainer3, blobContainer1, blobContainer3, blobContainer1, blobContainer1, blobContainer3) + ); + } + mockBlobContainerOrderedList.addAll( + List.of(blobContainer2, blobContainer1, blobContainer2, blobContainer1, blobContainer1, blobContainer2) + ); + BlobContainer[] mockBlobContainerOrderedArray = new BlobContainer[mockBlobContainerOrderedList.size()]; + mockBlobContainerOrderedList.toArray(mockBlobContainerOrderedArray); + when(blobStore.blobContainer(ArgumentMatchers.any())).thenReturn(uuidBlobContainer, mockBlobContainerOrderedArray); + when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); + } + + private ClusterMetadataManifest generateClusterMetadataManifest( + String clusterUUID, + String previousClusterUUID, + String stateUUID, + List uploadedIndexMetadata, + String globalMetadataFileName, + Boolean isUUIDCommitted + ) { + return ClusterMetadataManifest.builder() + .indices(uploadedIndexMetadata) + .clusterTerm(1L) + .stateVersion(1L) + .stateUUID(stateUUID) + .clusterUUID(clusterUUID) + .nodeId("nodeA") + .opensearchVersion(VersionUtils.randomOpenSearchVersion(random())) + .previousClusterUUID(previousClusterUUID) + .committed(true) + .clusterUUIDCommitted(isUUIDCommitted) + .globalMetadataFileName(globalMetadataFileName) + .codecVersion(ClusterMetadataManifest.CODEC_V1) + .build(); + } + + private BlobContainer mockBlobStoreObjects() { + return mockBlobStoreObjects(BlobContainer.class); + } + + private BlobContainer mockBlobStoreObjects(Class blobContainerClazz) { + final BlobPath blobPath = mock(BlobPath.class); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + when(blobPath.add(anyString())).thenReturn(blobPath); + when(blobPath.buildAsString()).thenReturn("/blob/path/"); + final BlobContainer blobContainer = mock(blobContainerClazz); + when(blobContainer.path()).thenReturn(blobPath); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); + return blobContainer; + } + + private void mockBlobContainerForClusterUUIDs(BlobContainer blobContainer, Set clusterUUIDs) throws IOException { + Map blobContainerMap = new HashMap<>(); + for (String clusterUUID : clusterUUIDs) { + blobContainerMap.put(clusterUUID, mockBlobStoreObjects()); + } + when(blobContainer.children()).thenReturn(blobContainerMap); + } + + private void mockBlobContainer( + BlobContainer blobContainer, + ClusterMetadataManifest clusterMetadataManifest, + Map indexMetadataMap + ) throws IOException { + mockBlobContainer(blobContainer, clusterMetadataManifest, indexMetadataMap, ClusterMetadataManifest.CODEC_V0); + } + + private void mockBlobContainer( + BlobContainer blobContainer, + ClusterMetadataManifest clusterMetadataManifest, + Map indexMetadataMap, + int codecVersion + ) throws IOException { + String manifestFileName = codecVersion >= ClusterMetadataManifest.CODEC_V1 + ? "manifest__manifestFileName__abcd__abcd__abcd__1" + : "manifestFileName"; + BlobMetadata blobMetadata = new PlainBlobMetadata(manifestFileName, 1); + when(blobContainer.listBlobsByPrefixInSortedOrder("manifest" + DELIMITER, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC)) + .thenReturn(Arrays.asList(blobMetadata)); + + BytesReference bytes = RemoteClusterStateService.CLUSTER_METADATA_MANIFEST_FORMAT.serialize( + clusterMetadataManifest, + manifestFileName, + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + when(blobContainer.readBlob(manifestFileName)).thenReturn(new ByteArrayInputStream(bytes.streamInput().readAllBytes())); + + clusterMetadataManifest.getIndices().forEach(uploadedIndexMetadata -> { + try { + IndexMetadata indexMetadata = indexMetadataMap.get(uploadedIndexMetadata.getIndexUUID()); + if (indexMetadata == null) { + return; + } + String fileName = uploadedIndexMetadata.getUploadedFilename(); + when(blobContainer.readBlob(fileName + ".dat")).thenAnswer((invocationOnMock) -> { + BytesReference bytesIndexMetadata = RemoteClusterStateService.INDEX_METADATA_FORMAT.serialize( + indexMetadata, + fileName, + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytesIndexMetadata.streamInput().readAllBytes()); + }); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + + private void mockBlobContainerForGlobalMetadata( + BlobContainer blobContainer, + ClusterMetadataManifest clusterMetadataManifest, + Metadata metadata + ) throws IOException { + String mockManifestFileName = "manifest__1__2__C__456__1"; + BlobMetadata blobMetadata = new PlainBlobMetadata(mockManifestFileName, 1); + when( + blobContainer.listBlobsByPrefixInSortedOrder( + "manifest" + RemoteClusterStateService.DELIMITER, + 1, + BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC + ) + ).thenReturn(Arrays.asList(blobMetadata)); + + BytesReference bytes = RemoteClusterStateService.CLUSTER_METADATA_MANIFEST_FORMAT.serialize( + clusterMetadataManifest, + mockManifestFileName, + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + when(blobContainer.readBlob(mockManifestFileName)).thenReturn(new ByteArrayInputStream(bytes.streamInput().readAllBytes())); + + String[] splitPath = clusterMetadataManifest.getGlobalMetadataFileName().split("/"); + when(blobContainer.readBlob(RemoteClusterStateService.GLOBAL_METADATA_FORMAT.blobName(splitPath[splitPath.length - 1]))).thenAnswer( + (invocationOnMock) -> { + BytesReference bytesGlobalMetadata = RemoteClusterStateService.GLOBAL_METADATA_FORMAT.serialize( + metadata, + "global-metadata-file", + blobStoreRepository.getCompressor(), + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytesGlobalMetadata.streamInput().readAllBytes()); + } + ); + } + + private static ClusterState.Builder generateClusterStateWithGlobalMetadata() { + final Settings clusterSettings = Settings.builder().put("cluster.blocks.read_only", true).build(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + + return ClusterState.builder(ClusterName.DEFAULT) + .version(1L) + .stateUUID("state-uuid") + .metadata( + Metadata.builder() + .persistentSettings(clusterSettings) + .clusterUUID("cluster-uuid") + .coordinationMetadata(coordinationMetadata) + .build() + ); + } + + private static ClusterState.Builder generateClusterStateWithOneIndex() { + final Index index = new Index("test-index", "index-uuid"); + final Settings idxSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build(); + final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + + return ClusterState.builder(ClusterName.DEFAULT) + .version(1L) + .stateUUID("state-uuid") + .metadata( + Metadata.builder() + .version(randomNonNegativeLong()) + .put(indexMetadata, true) + .clusterUUID("cluster-uuid") + .coordinationMetadata(coordinationMetadata) + .build() + ); + } + + private static DiscoveryNodes nodesWithLocalNodeClusterManager() { + return DiscoveryNodes.builder().clusterManagerNodeId("cluster-manager-id").localNodeId("cluster-manager-id").build(); + } + +} diff --git a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java index 7d36795d1a896..c34f13041cb11 100644 --- a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java @@ -52,6 +52,7 @@ import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; @@ -173,7 +174,8 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th threadPool, xContentRegistry(), dispatcher, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + NoopTracer.INSTANCE ) { @Override @@ -238,7 +240,8 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, channel.sendResponse(emptyResponse(RestStatus.BAD_REQUEST)); } }, - clusterSettings + clusterSettings, + NoopTracer.INSTANCE ) { @Override protected HttpServerChannel bind(InetSocketAddress hostAddress) { diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index 1ee82e83fc78b..cd6d0225d6f44 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -86,6 +86,7 @@ import org.opensearch.index.fielddata.IndexFieldDataCache; import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.mapper.Uid; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexingOperationListener; import org.opensearch.index.shard.SearchOperationListener; @@ -103,11 +104,13 @@ import org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; import org.opensearch.search.internal.ReaderContext; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; @@ -202,7 +205,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); repositoriesService = new RepositoriesService( settings, @@ -230,7 +234,8 @@ private IndexService newIndexService(IndexModule module) throws IOException { return new RemoteBlobStoreInternalTranslogFactory( repositoriesServiceReference::get, threadPool, - indexSettings.getRemoteStoreTranslogRepository() + indexSettings.getRemoteStoreTranslogRepository(), + new RemoteTranslogTransferTracker(shardRouting.shardId(), 10) ); } return new InternalTranslogFactory(); @@ -253,7 +258,10 @@ private IndexService newIndexService(IndexModule module) throws IOException { () -> false, null, new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), - translogFactorySupplier + translogFactorySupplier, + () -> IndexSettings.DEFAULT_REFRESH_INTERVAL, + () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + DefaultRecoverySettings.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/index/IndexServiceTests.java b/server/src/test/java/org/opensearch/index/IndexServiceTests.java index c62b8e980d321..00c8b52d30c55 100644 --- a/server/src/test/java/org/opensearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/opensearch/index/IndexServiceTests.java @@ -33,7 +33,9 @@ package org.opensearch.index; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; +import org.opensearch.Version; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.compress.CompressedXContent; @@ -526,4 +528,76 @@ public void testUpdateRemoteTranslogBufferIntervalDynamically() { indexMetadata = client().admin().cluster().prepareState().execute().actionGet().getState().metadata().index("test"); assertEquals("20s", indexMetadata.getSettings().get(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey())); } + + public void testIndexSort() { + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "0ms") // disable + .putList("index.sort.field", "sortfield") + .build(); + try { + // Integer index sort should be remained to int sort type + IndexService index = createIndex("test", settings, createTestMapping("integer")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.INT); + + // Long index sort should be remained to long sort type + index = createIndex("test", settings, createTestMapping("long")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.LONG); + + // Float index sort should be remained to float sort type + index = createIndex("test", settings, createTestMapping("float")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.FLOAT); + + // Double index sort should be remained to double sort type + index = createIndex("test", settings, createTestMapping("double")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.DOUBLE); + + // String index sort should be remained to string sort type + index = createIndex("test", settings, createTestMapping("string")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.STRING); + } catch (IllegalArgumentException ex) { + assertEquals("failed to parse value [0ms] for setting [index.translog.sync_interval], must be >= [100ms]", ex.getMessage()); + } + } + + public void testIndexSortBackwardCompatible() { + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "0ms") // disable + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.V_2_6_1) + .putList("index.sort.field", "sortfield") + .build(); + try { + // Integer index sort should be converted to long sort type + IndexService index = createIndex("test", settings, createTestMapping("integer")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.LONG); + + // Long index sort should be remained to long sort type + index = createIndex("test", settings, createTestMapping("long")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.LONG); + + // Float index sort should be remained to float sort type + index = createIndex("test", settings, createTestMapping("float")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.FLOAT); + + // Double index sort should be remained to double sort type + index = createIndex("test", settings, createTestMapping("double")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.DOUBLE); + + // String index sort should be remained to string sort type + index = createIndex("test", settings, createTestMapping("string")); + assertTrue(index.getIndexSortSupplier().get().getSort()[0].getType() == SortField.Type.STRING); + } catch (IllegalArgumentException ex) { + assertEquals("failed to parse value [0ms] for setting [index.translog.sync_interval], must be >= [100ms]", ex.getMessage()); + } + } + + private static String createTestMapping(String type) { + return " \"properties\": {\n" + + " \"test\": {\n" + + " \"type\": \"text\"\n" + + " },\n" + + " \"sortfield\": {\n" + + " \"type\": \" + type + \"\n" + + " }\n" + + " }"; + } } diff --git a/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java b/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java index 387997892ee30..32c4c048d77ba 100644 --- a/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java +++ b/server/src/test/java/org/opensearch/index/MergePolicySettingsTests.java @@ -31,6 +31,7 @@ package org.opensearch.index; +import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.NoMergePolicy; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; @@ -49,17 +50,17 @@ public class MergePolicySettingsTests extends OpenSearchTestCase { protected final ShardId shardId = new ShardId("index", "_na_", 1); public void testCompoundFileSettings() throws IOException { - assertThat(new MergePolicyConfig(logger, indexSettings(Settings.EMPTY)).getMergePolicy().getNoCFSRatio(), equalTo(0.1)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(true))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0.5))).getMergePolicy().getNoCFSRatio(), equalTo(0.5)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(1.0))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(0.0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(Settings.EMPTY)).getMergePolicy().getNoCFSRatio(), equalTo(0.1)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(true))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0.5))).getMergePolicy().getNoCFSRatio(), equalTo(0.5)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(1.0))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new TieredMergePolicyProvider(logger, indexSettings(build(0.0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); } private static IndexSettings indexSettings(Settings settings) { @@ -67,33 +68,197 @@ private static IndexSettings indexSettings(Settings settings) { } public void testNoMerges() { - MergePolicyConfig mp = new MergePolicyConfig( + TieredMergePolicyProvider tmp = new TieredMergePolicyProvider( logger, - indexSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).build()) + indexSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false).build()) ); - assertTrue(mp.getMergePolicy() instanceof NoMergePolicy); + LogByteSizeMergePolicyProvider lbsmp = new LogByteSizeMergePolicyProvider( + logger, + indexSettings(Settings.builder().put(MergePolicyProvider.INDEX_MERGE_ENABLED, false).build()) + ); + assertTrue(tmp.getMergePolicy() instanceof NoMergePolicy); + assertTrue(lbsmp.getMergePolicy() instanceof NoMergePolicy); } public void testUpdateSettings() throws IOException { - IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); - assertThat(indexSettings.getMergePolicy().getNoCFSRatio(), equalTo(0.1)); + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + IndexSettings indexSettings = indexSettings(settings); + assertThat(indexSettings.getMergePolicy(false).getNoCFSRatio(), equalTo(0.1)); indexSettings = indexSettings(build(0.9)); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.9)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.9)); indexSettings.updateIndexMetadata(newIndexMeta("index", build(0.1))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.1)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.1)); indexSettings.updateIndexMetadata(newIndexMeta("index", build(0.0))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.0)); indexSettings.updateIndexMetadata(newIndexMeta("index", build("true"))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(1.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(1.0)); indexSettings.updateIndexMetadata(newIndexMeta("index", build("false"))); - assertThat((indexSettings.getMergePolicy()).getNoCFSRatio(), equalTo(0.0)); + assertThat((indexSettings.getMergePolicy(false)).getNoCFSRatio(), equalTo(0.0)); + } + + public void testDefaultMergePolicy() throws IOException { + IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + } + + public void testMergePolicyPrecedence() throws IOException { + // 1. INDEX_MERGE_POLICY is not set + // assert defaults + IndexSettings indexSettings = indexSettings(EMPTY_SETTINGS); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 1.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as log_byte_size + // assert index policy is tiered whereas time series index policy is log_byte_size + Settings nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build(); + indexSettings = new IndexSettings(newIndexMeta("test", Settings.EMPTY), nodeSettings); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + // 1.2 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as tiered + // assert both index and time series index policy is tiered + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + indexSettings = new IndexSettings(newIndexMeta("test", Settings.EMPTY), nodeSettings); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 2. INDEX_MERGE_POLICY set as tiered + // assert both index and time-series-index merge policy is set as tiered + indexSettings = indexSettings( + Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()).build() + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 2.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as log_byte_size + // assert both index and time-series-index merge policy is set as tiered + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build(); + indexSettings = new IndexSettings( + newIndexMeta( + "test", + Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()).build() + ), + nodeSettings + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof OpenSearchTieredMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof OpenSearchTieredMergePolicy); + + // 3. INDEX_MERGE_POLICY set as log_byte_size + // assert both index and time-series-index merge policy is set as log_byte_size + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof LogByteSizeMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + // 3.1 node setting TIME_SERIES_INDEX_MERGE_POLICY is set as tiered + // assert both index and time-series-index merge policy is set as log_byte_size + nodeSettings = Settings.builder() + .put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.TIERED.getValue()) + .build(); + indexSettings = new IndexSettings( + newIndexMeta( + "test", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ), + nodeSettings + ); + assertTrue(indexSettings.getMergePolicy(false) instanceof LogByteSizeMergePolicy); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + + } + + public void testInvalidMergePolicy() throws IOException { + + final Settings invalidSettings = Settings.builder().put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "invalid").build(); + IllegalArgumentException exc1 = expectThrows( + IllegalArgumentException.class, + () -> IndexSettings.INDEX_MERGE_POLICY.get(invalidSettings) + ); + assertThat(exc1.getMessage(), containsString(" has unsupported policy specified: ")); + IllegalArgumentException exc2 = expectThrows( + IllegalArgumentException.class, + () -> indexSettings(invalidSettings).getMergePolicy(false) + ); + assertThat(exc2.getMessage(), containsString(" has unsupported policy specified: ")); + + final Settings invalidSettings2 = Settings.builder().put(IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.getKey(), "invalid").build(); + IllegalArgumentException exc3 = expectThrows( + IllegalArgumentException.class, + () -> IndexSettings.TIME_SERIES_INDEX_MERGE_POLICY.get(invalidSettings2) + ); + assertThat(exc3.getMessage(), containsString(" has unsupported policy specified: ")); + + IllegalArgumentException exc4 = expectThrows( + IllegalArgumentException.class, + () -> new IndexSettings(newIndexMeta("test", Settings.EMPTY), invalidSettings2).getMergePolicy(true) + ); + assertThat(exc4.getMessage(), containsString(" has unsupported policy specified: ")); + } + + public void testUpdateSettingsForLogByteSizeMergePolicy() throws IOException { + IndexSettings indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertTrue(indexSettings.getMergePolicy(true) instanceof LogByteSizeMergePolicy); + assertThat(indexSettings.getMergePolicy(true).getNoCFSRatio(), equalTo(0.1)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.9) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.9)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.1) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.1)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.0) + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.0)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), "true") + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(1.0)); + indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), "false") + .build() + ); + assertThat((indexSettings.getMergePolicy(true)).getNoCFSRatio(), equalTo(0.0)); } public void testTieredMergePolicySettingsUpdate() throws IOException { IndexSettings indexSettings = indexSettings(Settings.EMPTY); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d ); @@ -102,21 +267,21 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d + TieredMergePolicyProvider.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, 0.0d ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMbFrac(), 0 ); indexSettings.updateIndexMetadata( @@ -124,41 +289,41 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB) + TieredMergePolicyProvider.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB) ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE ); indexSettings.updateIndexMetadata( newIndexMeta( "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1 + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE - 1 ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1 + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE - 1 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), 0.0001 ); indexSettings.updateIndexMetadata( @@ -166,21 +331,21 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1) + TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1) ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER, 0 ); indexSettings.updateIndexMetadata( @@ -188,37 +353,37 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { "index", Settings.builder() .put( - MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1 + TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER + 1 ) .build() ) ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER + 1, 0 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_DELETES_PCT_ALLOWED, 0 ); indexSettings.updateIndexMetadata( newIndexMeta( "index", - Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build() + Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build() ) ); - assertEquals(((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), 22, 0); + assertEquals(((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), 22, 0); IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, () -> indexSettings.updateIndexMetadata( newIndexMeta( "index", - Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build() + Settings.builder().put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build() ) ) ); @@ -226,50 +391,162 @@ public void testTieredMergePolicySettingsUpdate() throws IOException { assertThat(cause.getMessage(), containsString("must be <= 50.0")); indexSettings.updateIndexMetadata(newIndexMeta("index", EMPTY_SETTINGS)); // see if defaults are restored assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getForceMergeDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), - MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergeAtOnce(), + TieredMergePolicyProvider.DEFAULT_MAX_MERGE_AT_ONCE ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), - new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getMaxMergedSegmentMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), - MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getSegmentsPerTier(), + TieredMergePolicyProvider.DEFAULT_SEGMENTS_PER_TIER, 0 ); assertEquals( - ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), - MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, + ((OpenSearchTieredMergePolicy) indexSettings.getMergePolicy(false)).getDeletesPctAllowed(), + TieredMergePolicyProvider.DEFAULT_DELETES_PCT_ALLOWED, 0 ); } + public void testLogByteSizeMergePolicySettingsUpdate() throws IOException { + + IndexSettings indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), IndexSettings.IndexMergePolicy.LOG_BYTE_SIZE.getValue()) + .build() + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMergeFactor(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MERGE_FACTOR_SETTING.getKey(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + 1 + ) + .build() + ) + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMergeFactor(), + LogByteSizeMergePolicyProvider.DEFAULT_MERGE_FACTOR + 1 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MERGE_POLICY_MIN_MERGE_SETTING.getKey(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE.getMb() + 1, ByteSizeUnit.MB) + ) + .build() + ) + ); + + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMinMergeMB(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MIN_MERGE.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_SETTING.getKey(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMb() + 100, ByteSizeUnit.MB) + ) + .build() + ) + ); + + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeMB(), + new ByteSizeValue(TieredMergePolicyProvider.DEFAULT_MAX_MERGED_SEGMENT.getMb() + 100, ByteSizeUnit.MB).getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put( + LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGE_SEGMENT_FOR_FORCED_MERGE_SETTING.getKey(), + new ByteSizeValue( + LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE.getMb() - 100, + ByteSizeUnit.MB + ) + ) + .build() + ) + ); + assertEquals( + ((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeMBForForcedMerge(), + new ByteSizeValue(LogByteSizeMergePolicyProvider.DEFAULT_MAX_MERGE_SEGMENT_FORCE_MERGE.getMb() - 100, ByteSizeUnit.MB) + .getMbFrac(), + 0.001 + ); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_MAX_MERGED_DOCS_SETTING.getKey(), 10000000) + .build() + ) + ); + assertEquals(((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMaxMergeDocs(), 10000000); + + indexSettings.updateIndexMetadata( + newIndexMeta( + "index", + Settings.builder() + .put(IndexSettings.INDEX_MERGE_POLICY.getKey(), "log_byte_size") + .put(LogByteSizeMergePolicyProvider.INDEX_LBS_NO_CFS_RATIO_SETTING.getKey(), 0.1) + .build() + ) + ); + assertEquals(indexSettings.getMergePolicy(true).getNoCFSRatio(), 0.1, 0.0); + } + public Settings build(String value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(double value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(int value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } public Settings build(boolean value) { - return Settings.builder().put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); + return Settings.builder().put(TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), value).build(); } } diff --git a/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java b/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java index 2443ee1ab40be..baaf584702f78 100644 --- a/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/MergeSchedulerSettingsTests.java @@ -92,8 +92,8 @@ public void testUpdateAutoThrottleSettings() throws Exception { .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "2") .put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), "true"); @@ -123,8 +123,8 @@ public void testUpdateMergeMaxThreadCount() throws Exception { .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") - .put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), "2") + .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), "2") .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), "10000") .put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), "10000"); IndexSettings settings = new IndexSettings(newIndexMeta("index", builder.build()), Settings.EMPTY); diff --git a/server/src/test/java/org/opensearch/index/OpenSearchTieredMergePolicyTests.java b/server/src/test/java/org/opensearch/index/OpenSearchTieredMergePolicyTests.java index cbc439041666f..614dacd457782 100644 --- a/server/src/test/java/org/opensearch/index/OpenSearchTieredMergePolicyTests.java +++ b/server/src/test/java/org/opensearch/index/OpenSearchTieredMergePolicyTests.java @@ -32,9 +32,18 @@ package org.opensearch.index; +import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.SegmentCommitInfo; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.TieredMergePolicy; +import org.apache.lucene.util.InfoStream; +import org.apache.lucene.util.Version; import org.opensearch.test.OpenSearchTestCase; +import java.io.IOException; +import java.util.Collections; +import java.util.Set; + public class OpenSearchTieredMergePolicyTests extends OpenSearchTestCase { public void testDefaults() { @@ -80,4 +89,32 @@ public void testSetDeletesPctAllowed() { policy.setDeletesPctAllowed(42); assertEquals(42, policy.regularMergePolicy.getDeletesPctAllowed(), 0); } + + public void testFindDeleteMergesReturnsNullOnEmptySegmentInfos() throws IOException { + MergePolicy.MergeSpecification mergeSpecification = new OpenSearchTieredMergePolicy().findForcedDeletesMerges( + new SegmentInfos(Version.LATEST.major), + new MergePolicy.MergeContext() { + @Override + public int numDeletesToMerge(SegmentCommitInfo info) { + return 0; + } + + @Override + public int numDeletedDocs(SegmentCommitInfo info) { + return 0; + } + + @Override + public InfoStream getInfoStream() { + return InfoStream.NO_OUTPUT; + } + + @Override + public Set getMergingSegments() { + return Collections.emptySet(); + } + } + ); + assertNull(mergeSpecification); + } } diff --git a/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java b/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java index 531c636c9d165..478fdcb24f76a 100644 --- a/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/SegmentReplicationPressureServiceTests.java @@ -33,13 +33,15 @@ import org.mockito.stubbing.Answer; import static java.util.Arrays.asList; -import static org.opensearch.index.SegmentReplicationPressureService.MAX_REPLICATION_TIME_SETTING; +import static org.opensearch.index.SegmentReplicationPressureService.MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING; +import static org.opensearch.index.SegmentReplicationPressureService.MAX_REPLICATION_TIME_BACKPRESSURE_SETTING; import static org.opensearch.index.SegmentReplicationPressureService.SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -50,7 +52,7 @@ public class SegmentReplicationPressureServiceTests extends OpenSearchIndexLevel private static final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) - .put(MAX_REPLICATION_TIME_SETTING.getKey(), TimeValue.timeValueSeconds(5)) + .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueSeconds(5)) .build(); public void testIsSegrepLimitBreached() throws Exception { @@ -112,7 +114,7 @@ public void testIsSegrepLimitBreached_onlyCheckpointLimitBreached() throws Excep indexInBatches(5, shards, primaryShard); - Set replicationStats = primaryShard.getReplicationStats(); + Set replicationStats = primaryShard.getReplicationStatsForTrackedReplicas(); assertEquals(1, replicationStats.size()); SegmentReplicationShardStats shardStats = replicationStats.stream().findFirst().get(); assertEquals(5, shardStats.getCheckpointsBehindCount()); @@ -140,7 +142,7 @@ public void testIsSegrepLimitBreached_onlyTimeLimitBreached() throws Exception { indexInBatches(1, shards, primaryShard); assertBusy(() -> { - Set replicationStats = primaryShard.getReplicationStats(); + Set replicationStats = primaryShard.getReplicationStatsForTrackedReplicas(); assertEquals(1, replicationStats.size()); SegmentReplicationShardStats shardStats = replicationStats.stream().findFirst().get(); assertTrue(shardStats.getCurrentReplicationTimeMillis() > TimeValue.timeValueSeconds(5).millis()); @@ -162,7 +164,7 @@ public void testIsSegrepLimitBreached_underStaleNodeLimit() throws Exception { SegmentReplicationPressureService service = buildPressureService(settings, primaryShard); assertBusy(() -> { - Set replicationStats = primaryShard.getReplicationStats(); + Set replicationStats = primaryShard.getReplicationStatsForTrackedReplicas(); assertEquals(3, replicationStats.size()); SegmentReplicationShardStats shardStats = replicationStats.stream().findFirst().get(); assertTrue(shardStats.getCurrentReplicationTimeMillis() > TimeValue.timeValueSeconds(5).millis()); @@ -196,7 +198,8 @@ public void testFailStaleReplicaTask() throws Exception { final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) - .put(MAX_REPLICATION_TIME_SETTING.getKey(), TimeValue.timeValueMillis(10)) + .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueMillis(10)) + .put(MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING.getKey(), TimeValue.timeValueMillis(20)) .build(); try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { @@ -208,7 +211,7 @@ public void testFailStaleReplicaTask() throws Exception { indexInBatches(5, shards, primaryShard); // assert that replica shard is few checkpoints behind primary - Set replicationStats = primaryShard.getReplicationStats(); + Set replicationStats = primaryShard.getReplicationStatsForTrackedReplicas(); assertEquals(1, replicationStats.size()); SegmentReplicationShardStats shardStats = replicationStats.stream().findFirst().get(); assertEquals(5, shardStats.getCheckpointsBehindCount()); @@ -222,6 +225,38 @@ public void testFailStaleReplicaTask() throws Exception { } } + public void testFailStaleReplicaTaskDisabled() throws Exception { + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(SEGMENT_REPLICATION_INDEXING_PRESSURE_ENABLED.getKey(), true) + .put(MAX_REPLICATION_TIME_BACKPRESSURE_SETTING.getKey(), TimeValue.timeValueMillis(10)) + .put(MAX_REPLICATION_LIMIT_STALE_REPLICA_SETTING.getKey(), TimeValue.timeValueMillis(0)) + .build(); + + try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primaryShard = shards.getPrimary(); + SegmentReplicationPressureService service = buildPressureService(settings, primaryShard); + Mockito.reset(shardStateAction); + + // index docs in batches without refreshing + indexInBatches(5, shards, primaryShard); + + // assert that replica shard is few checkpoints behind primary + Set replicationStats = primaryShard.getReplicationStatsForTrackedReplicas(); + assertEquals(1, replicationStats.size()); + SegmentReplicationShardStats shardStats = replicationStats.stream().findFirst().get(); + assertEquals(5, shardStats.getCheckpointsBehindCount()); + + // call the background task + service.getFailStaleReplicaTask().runInternal(); + + // verify that remote shard failed method is never called as it is disabled. + verify(shardStateAction, never()).remoteShardFailed(any(), anyString(), anyLong(), anyBoolean(), anyString(), any(), any()); + replicateSegments(primaryShard, shards.getReplicas()); + } + } + private int indexInBatches(int count, ReplicationGroup shards, IndexShard primaryShard) throws Exception { int totalDocs = 0; for (int i = 0; i < count; i++) { @@ -243,6 +278,13 @@ private SegmentReplicationPressureService buildPressureService(Settings settings ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn(new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - return new SegmentReplicationPressureService(settings, clusterService, indicesService, shardStateAction, mock(ThreadPool.class)); + return new SegmentReplicationPressureService( + settings, + clusterService, + indicesService, + shardStateAction, + new SegmentReplicationStatsTracker(indicesService), + mock(ThreadPool.class) + ); } } diff --git a/server/src/test/java/org/opensearch/index/SegmentReplicationStatsTrackerTests.java b/server/src/test/java/org/opensearch/index/SegmentReplicationStatsTrackerTests.java new file mode 100644 index 0000000000000..04423d583e8f9 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/SegmentReplicationStatsTrackerTests.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.IndicesService; +import org.opensearch.test.OpenSearchTestCase; + +import org.mockito.Mockito; + +import static org.mockito.Mockito.mock; + +public class SegmentReplicationStatsTrackerTests extends OpenSearchTestCase { + + private IndicesService indicesService = mock(IndicesService.class); + + public void testRejectedCount() { + SegmentReplicationStatsTracker segmentReplicationStatsTracker = new SegmentReplicationStatsTracker(indicesService); + + // Verify that total rejection count is 0 on an empty rejectionCount map in statsTracker. + assertTrue(segmentReplicationStatsTracker.getRejectionCount().isEmpty()); + assertEquals(segmentReplicationStatsTracker.getTotalRejectionStats().getTotalRejectionCount(), 0L); + + // Verify that total rejection count is 1 after incrementing rejectionCount. + segmentReplicationStatsTracker.incrementRejectionCount(Mockito.mock(ShardId.class)); + assertEquals(segmentReplicationStatsTracker.getTotalRejectionStats().getTotalRejectionCount(), 1L); + } + +} diff --git a/server/src/test/java/org/opensearch/index/codec/CodecTests.java b/server/src/test/java/org/opensearch/index/codec/CodecTests.java index 939245c36fb11..5fa8aff8c3a6c 100644 --- a/server/src/test/java/org/opensearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/opensearch/index/codec/CodecTests.java @@ -48,8 +48,6 @@ import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.IndexAnalyzers; -import org.opensearch.index.codec.customcodecs.Lucene95CustomCodec; -import org.opensearch.index.codec.customcodecs.Lucene95CustomStoredFieldsFormat; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.similarity.SimilarityService; @@ -95,49 +93,7 @@ public void testZlib() throws Exception { assert codec instanceof PerFieldMappingPostingFormatCodec; } - public void testZstd() throws Exception { - Codec codec = createCodecService(false).codec("zstd"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); - } - - public void testZstdNoDict() throws Exception { - Codec codec = createCodecService(false).codec("zstd_no_dict"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); - } - - public void testZstdWithCompressionLevel() throws Exception { - int randomCompressionLevel = randomIntBetween(1, 6); - Codec codec = createCodecService(randomCompressionLevel, "zstd").codec("zstd"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(randomCompressionLevel, storedFieldsFormat.getCompressionLevel()); - } - - public void testZstdNoDictWithCompressionLevel() throws Exception { - int randomCompressionLevel = randomIntBetween(1, 6); - Codec codec = createCodecService(randomCompressionLevel, "zstd_no_dict").codec("zstd_no_dict"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(randomCompressionLevel, storedFieldsFormat.getCompressionLevel()); - } - public void testBestCompressionWithCompressionLevel() { - final Settings zstdSettings = Settings.builder() - .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) - .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), randomFrom(CodecService.ZSTD_CODEC, CodecService.ZSTD_NO_DICT_CODEC)) - .build(); - - // able to validate zstd - final IndexScopedSettings zstdIndexScopedSettings = new IndexScopedSettings( - zstdSettings, - IndexScopedSettings.BUILT_IN_INDEX_SETTINGS - ); - zstdIndexScopedSettings.validate(zstdSettings, true); - final Settings settings = Settings.builder() .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) @@ -152,17 +108,6 @@ public void testLuceneCodecsWithCompressionLevel() { String codecName = randomFrom(Codec.availableCodecs()); Codec codec = Codec.forName(codecName); - final Settings customCodecSettings = Settings.builder() - .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) - .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), "Lucene95CustomCodec") - .build(); - - final IndexScopedSettings customCodecIndexScopedSettings = new IndexScopedSettings( - customCodecSettings, - IndexScopedSettings.BUILT_IN_INDEX_SETTINGS - ); - customCodecIndexScopedSettings.validate(customCodecSettings, true); - final Settings settings = Settings.builder() .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName) @@ -178,14 +123,6 @@ public void testLuceneCodecsWithCompressionLevel() { } } - public void testZstandardCompressionLevelSupport() throws Exception { - CodecService codecService = createCodecService(false); - CodecSettings zstdCodec = (CodecSettings) codecService.codec("zstd"); - CodecSettings zstdNoDictCodec = (CodecSettings) codecService.codec("zstd_no_dict"); - assertTrue(zstdCodec.supports(INDEX_CODEC_COMPRESSION_LEVEL_SETTING)); - assertTrue(zstdNoDictCodec.supports(INDEX_CODEC_COMPRESSION_LEVEL_SETTING)); - } - public void testDefaultMapperServiceNull() throws Exception { Codec codec = createCodecService(true).codec("default"); assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_SPEED, codec); @@ -196,20 +133,6 @@ public void testBestCompressionMapperServiceNull() throws Exception { assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_COMPRESSION, codec); } - public void testZstdMapperServiceNull() throws Exception { - Codec codec = createCodecService(true).codec("zstd"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); - } - - public void testZstdNoDictMapperServiceNull() throws Exception { - Codec codec = createCodecService(true).codec("zstd_no_dict"); - assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); - Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, storedFieldsFormat.getCompressionLevel()); - } - public void testExceptionCodecNull() { assertThrows(IllegalArgumentException.class, () -> createCodecService(true).codec(null)); } @@ -224,12 +147,6 @@ public void testCodecServiceWithNullMapperService() { CodecService codecService = new CodecService(null, indexSettings, LogManager.getLogger("test")); assert codecService.codec("default") instanceof Lucene95Codec; assert codecService.codec("best_compression") instanceof Lucene95Codec; - Lucene95CustomStoredFieldsFormat zstdStoredFieldsFormat = (Lucene95CustomStoredFieldsFormat) codecService.codec("zstd") - .storedFieldsFormat(); - Lucene95CustomStoredFieldsFormat zstdNoDictStoredFieldsFormat = (Lucene95CustomStoredFieldsFormat) codecService.codec("zstd") - .storedFieldsFormat(); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, zstdStoredFieldsFormat.getCompressionLevel()); - assertEquals(Lucene95CustomCodec.DEFAULT_COMPRESSION_LEVEL, zstdNoDictStoredFieldsFormat.getCompressionLevel()); } public void testCodecServiceWithOnlyMapperService() throws IOException { @@ -246,12 +163,6 @@ public void testCodecServiceWithOnlyMapperService() throws IOException { ); assert codecService.codec("default") instanceof PerFieldMappingPostingFormatCodec; assert codecService.codec("best_compression") instanceof PerFieldMappingPostingFormatCodec; - Lucene95CustomStoredFieldsFormat zstdStoredFieldsFormat = (Lucene95CustomStoredFieldsFormat) codecService.codec("zstd") - .storedFieldsFormat(); - Lucene95CustomStoredFieldsFormat zstdNoDictStoredFieldsFormat = (Lucene95CustomStoredFieldsFormat) codecService.codec("zstd") - .storedFieldsFormat(); - assertEquals(randomCompressionLevel, zstdStoredFieldsFormat.getCompressionLevel()); - assertEquals(randomCompressionLevel, zstdNoDictStoredFieldsFormat.getCompressionLevel()); } // write some docs with it, inspect .si to see this was the used compression @@ -262,13 +173,6 @@ private void assertStoredFieldsCompressionEquals(Lucene95Codec.Mode expected, Co assertEquals(expected, Lucene95Codec.Mode.valueOf(v)); } - private void assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode expected, Codec actual) throws Exception { - SegmentReader sr = getSegmentReader(actual); - String v = sr.getSegmentInfo().info.getAttribute(Lucene95CustomStoredFieldsFormat.MODE_KEY); - assertNotNull(v); - assertEquals(expected, Lucene95CustomCodec.Mode.valueOf(v)); - } - private CodecService createCodecService(boolean isMapperServiceNull) throws IOException { Settings nodeSettings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); if (isMapperServiceNull) { @@ -277,15 +181,6 @@ private CodecService createCodecService(boolean isMapperServiceNull) throws IOEx return buildCodecService(nodeSettings); } - private CodecService createCodecService(int randomCompressionLevel, String codec) throws IOException { - Settings nodeSettings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .put("index.codec", codec) - .put("index.codec.compression_level", randomCompressionLevel) - .build(); - return buildCodecService(nodeSettings); - } - private CodecService buildCodecService(Settings nodeSettings) throws IOException { IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("_na", nodeSettings); diff --git a/server/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java b/server/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java deleted file mode 100644 index cc794eb2c48f1..0000000000000 --- a/server/src/test/java/org/opensearch/index/codec/customcodecs/AbstractCompressorTests.java +++ /dev/null @@ -1,219 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.apache.lucene.codecs.compressing.Compressor; -import org.apache.lucene.codecs.compressing.Decompressor; -import org.apache.lucene.store.ByteArrayDataInput; -import org.apache.lucene.store.ByteBuffersDataInput; -import org.apache.lucene.store.ByteBuffersDataOutput; -import org.apache.lucene.tests.util.LineFileDocs; -import org.apache.lucene.tests.util.TestUtil; -import org.apache.lucene.util.BytesRef; -import org.opensearch.test.OpenSearchTestCase; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.util.List; -import java.util.Random; - -/** - * Test cases for compressors (based on {@See org.opensearch.common.compress.DeflateCompressTests}). - */ -public abstract class AbstractCompressorTests extends OpenSearchTestCase { - - abstract Compressor compressor(); - - abstract Decompressor decompressor(); - - public void testEmpty() throws IOException { - final byte[] bytes = "".getBytes(StandardCharsets.UTF_8); - doTest(bytes); - } - - public void testShortLiterals() throws IOException { - final byte[] bytes = "1234567345673456745608910123".getBytes(StandardCharsets.UTF_8); - doTest(bytes); - } - - public void testRandom() throws IOException { - Random r = random(); - for (int i = 0; i < 10; i++) { - final byte[] bytes = new byte[TestUtil.nextInt(r, 1, 100000)]; - r.nextBytes(bytes); - doTest(bytes); - } - } - - public void testLineDocs() throws IOException { - Random r = random(); - LineFileDocs lineFileDocs = new LineFileDocs(r); - for (int i = 0; i < 10; i++) { - int numDocs = TestUtil.nextInt(r, 1, 200); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - for (int j = 0; j < numDocs; j++) { - String s = lineFileDocs.nextDoc().get("body"); - bos.write(s.getBytes(StandardCharsets.UTF_8)); - } - doTest(bos.toByteArray()); - } - lineFileDocs.close(); - } - - public void testRepetitionsL() throws IOException { - Random r = random(); - for (int i = 0; i < 10; i++) { - int numLongs = TestUtil.nextInt(r, 1, 10000); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - long theValue = r.nextLong(); - for (int j = 0; j < numLongs; j++) { - if (r.nextInt(10) == 0) { - theValue = r.nextLong(); - } - bos.write((byte) (theValue >>> 56)); - bos.write((byte) (theValue >>> 48)); - bos.write((byte) (theValue >>> 40)); - bos.write((byte) (theValue >>> 32)); - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - doTest(bos.toByteArray()); - } - } - - public void testRepetitionsI() throws IOException { - Random r = random(); - for (int i = 0; i < 10; i++) { - int numInts = TestUtil.nextInt(r, 1, 20000); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - int theValue = r.nextInt(); - for (int j = 0; j < numInts; j++) { - if (r.nextInt(10) == 0) { - theValue = r.nextInt(); - } - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - doTest(bos.toByteArray()); - } - } - - public void testRepetitionsS() throws IOException { - Random r = random(); - for (int i = 0; i < 10; i++) { - int numShorts = TestUtil.nextInt(r, 1, 40000); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - short theValue = (short) r.nextInt(65535); - for (int j = 0; j < numShorts; j++) { - if (r.nextInt(10) == 0) { - theValue = (short) r.nextInt(65535); - } - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - doTest(bos.toByteArray()); - } - } - - public void testMixed() throws IOException { - Random r = random(); - LineFileDocs lineFileDocs = new LineFileDocs(r); - for (int i = 0; i < 2; ++i) { - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - int prevInt = r.nextInt(); - long prevLong = r.nextLong(); - while (bos.size() < 400000) { - switch (r.nextInt(4)) { - case 0: - addInt(r, prevInt, bos); - break; - case 1: - addLong(r, prevLong, bos); - break; - case 2: - addString(lineFileDocs, bos); - break; - case 3: - addBytes(r, bos); - break; - default: - throw new IllegalStateException("Random is broken"); - } - } - doTest(bos.toByteArray()); - } - } - - private void addLong(Random r, long prev, ByteArrayOutputStream bos) { - long theValue = prev; - if (r.nextInt(10) != 0) { - theValue = r.nextLong(); - } - bos.write((byte) (theValue >>> 56)); - bos.write((byte) (theValue >>> 48)); - bos.write((byte) (theValue >>> 40)); - bos.write((byte) (theValue >>> 32)); - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - - private void addInt(Random r, int prev, ByteArrayOutputStream bos) { - int theValue = prev; - if (r.nextInt(10) != 0) { - theValue = r.nextInt(); - } - bos.write((byte) (theValue >>> 24)); - bos.write((byte) (theValue >>> 16)); - bos.write((byte) (theValue >>> 8)); - bos.write((byte) theValue); - } - - private void addString(LineFileDocs lineFileDocs, ByteArrayOutputStream bos) throws IOException { - String s = lineFileDocs.nextDoc().get("body"); - bos.write(s.getBytes(StandardCharsets.UTF_8)); - } - - private void addBytes(Random r, ByteArrayOutputStream bos) throws IOException { - byte bytes[] = new byte[TestUtil.nextInt(r, 1, 10000)]; - r.nextBytes(bytes); - bos.write(bytes); - } - - private void doTest(byte[] bytes) throws IOException { - final int length = bytes.length; - - ByteBuffersDataInput in = new ByteBuffersDataInput(List.of(ByteBuffer.wrap(bytes))); - ByteBuffersDataOutput out = new ByteBuffersDataOutput(); - - // let's compress - Compressor compressor = compressor(); - compressor.compress(in, out); - byte[] compressed = out.toArrayCopy(); - - // let's decompress - BytesRef outbytes = new BytesRef(); - Decompressor decompressor = decompressor(); - decompressor.decompress(new ByteArrayDataInput(compressed), length, 0, length, outbytes); - - // get the uncompressed array out of outbytes - byte[] restored = new byte[outbytes.length]; - System.arraycopy(outbytes.bytes, 0, restored, 0, outbytes.length); - - assertArrayEquals(bytes, restored); - } - -} diff --git a/server/src/test/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormatTests.java b/server/src/test/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormatTests.java deleted file mode 100644 index e87fb56770e4c..0000000000000 --- a/server/src/test/java/org/opensearch/index/codec/customcodecs/Lucene95CustomStoredFieldsFormatTests.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.codec.customcodecs; - -import org.opensearch.test.OpenSearchTestCase; - -public class Lucene95CustomStoredFieldsFormatTests extends OpenSearchTestCase { - - public void testDefaultLucene95CustomCodecMode() { - Lucene95CustomStoredFieldsFormat lucene95CustomStoredFieldsFormat = new Lucene95CustomStoredFieldsFormat(); - assertEquals(Lucene95CustomCodec.Mode.ZSTD, lucene95CustomStoredFieldsFormat.getMode()); - } - - public void testZstdNoDictLucene95CustomCodecMode() { - Lucene95CustomStoredFieldsFormat lucene95CustomStoredFieldsFormat = new Lucene95CustomStoredFieldsFormat( - Lucene95CustomCodec.Mode.ZSTD_NO_DICT - ); - assertEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, lucene95CustomStoredFieldsFormat.getMode()); - } - - public void testZstdModeWithCompressionLevel() { - int randomCompressionLevel = randomIntBetween(1, 6); - Lucene95CustomStoredFieldsFormat lucene95CustomStoredFieldsFormat = new Lucene95CustomStoredFieldsFormat( - Lucene95CustomCodec.Mode.ZSTD, - randomCompressionLevel - ); - assertEquals(Lucene95CustomCodec.Mode.ZSTD, lucene95CustomStoredFieldsFormat.getMode()); - assertEquals(randomCompressionLevel, lucene95CustomStoredFieldsFormat.getCompressionLevel()); - } - - public void testZstdNoDictLucene95CustomCodecModeWithCompressionLevel() { - int randomCompressionLevel = randomIntBetween(1, 6); - Lucene95CustomStoredFieldsFormat lucene95CustomStoredFieldsFormat = new Lucene95CustomStoredFieldsFormat( - Lucene95CustomCodec.Mode.ZSTD_NO_DICT, - randomCompressionLevel - ); - assertEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, lucene95CustomStoredFieldsFormat.getMode()); - assertEquals(randomCompressionLevel, lucene95CustomStoredFieldsFormat.getCompressionLevel()); - } - -} diff --git a/server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java b/server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java deleted file mode 100644 index 78cf62c08f889..0000000000000 --- a/server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdCompressorTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ -package org.opensearch.index.codec.customcodecs; - -import org.apache.lucene.codecs.compressing.Compressor; -import org.apache.lucene.codecs.compressing.Decompressor; - -/** - * Test ZSTD compression (with dictionary enabled) - */ -public class ZstdCompressorTests extends AbstractCompressorTests { - - private final Compressor compressor = new ZstdCompressionMode().newCompressor(); - private final Decompressor decompressor = new ZstdCompressionMode().newDecompressor(); - - @Override - Compressor compressor() { - return compressor; - } - - @Override - Decompressor decompressor() { - return decompressor; - } -} diff --git a/server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java b/server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java deleted file mode 100644 index 2eda81a6af2ab..0000000000000 --- a/server/src/test/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCompressorTests.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ -package org.opensearch.index.codec.customcodecs; - -import org.apache.lucene.codecs.compressing.Compressor; -import org.apache.lucene.codecs.compressing.Decompressor; - -/** - * Test ZSTD compression (with no dictionary). - */ -public class ZstdNoDictCompressorTests extends AbstractCompressorTests { - - private final Compressor compressor = new ZstdNoDictCompressionMode().newCompressor(); - private final Decompressor decompressor = new ZstdNoDictCompressionMode().newDecompressor(); - - @Override - Compressor compressor() { - return compressor; - } - - @Override - Decompressor decompressor() { - return decompressor; - } -} diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index 33d0a7c4242c0..293d3a18d120a 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -40,6 +40,7 @@ import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.appender.AbstractAppender; import org.apache.logging.log4j.core.filter.RegexFilter; +import org.apache.lucene.codecs.LiveDocsFormat; import org.apache.lucene.document.Field; import org.apache.lucene.document.KeywordField; import org.apache.lucene.document.LongPoint; @@ -78,6 +79,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; +import org.apache.lucene.tests.mockfile.ExtrasFS; import org.apache.lucene.tests.store.MockDirectoryWrapper; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; @@ -152,10 +154,12 @@ import org.opensearch.index.translog.TranslogException; import org.opensearch.index.translog.listener.TranslogEventListener; import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.MockLogAppender; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.ThreadPool; import org.hamcrest.MatcherAssert; import org.hamcrest.Matchers; +import org.junit.Assert; import java.io.Closeable; import java.io.IOException; @@ -165,6 +169,7 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -228,7 +233,6 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -3229,6 +3233,335 @@ public void testFailStart() throws IOException { } } + public void testUnreferencedFileCleanUpOnSegmentMergeFailureWithCleanUpEnabled() throws Exception { + MockDirectoryWrapper wrapper = newMockDirectory(); + final CountDownLatch cleanupCompleted = new CountDownLatch(1); + MockDirectoryWrapper.Failure fail = new MockDirectoryWrapper.Failure() { + public boolean didFail1; + public boolean didFail2; + + @Override + public void eval(MockDirectoryWrapper dir) throws IOException { + if (!doFail) { + return; + } + + // Fail segment merge with diskfull during merging terms. + if (callStackContainsAnyOf("mergeTerms") && !didFail1) { + didFail1 = true; + throw new IOException("No space left on device"); + } + if (callStackContains(LiveDocsFormat.class, "writeLiveDocs") && !didFail2) { + didFail2 = true; + throw new IOException("No space left on device"); + } + } + }; + + wrapper.failOn(fail); + try { + Store store = createStore(wrapper); + final Engine.EventListener eventListener = new Engine.EventListener() { + @Override + public void onFailedEngine(String reason, Exception e) { + try { + // extra0 file is added as a part of + // https://lucene.apache.org/core/7_2_1/test-framework/org/apache/lucene/mockfile/ExtrasFS.html + // Safe to remove from file count along with write.lock without impacting the test. + long fileCount = Arrays.stream(store.directory().listAll()) + .filter(file -> file.equals("write.lock") == false && ExtrasFS.isExtra(file) == false) + .count(); + + // Since only one document is committed and unreferenced files are cleaned up, + // there are 4 files (*cfs, *cfe, *si and segments_*). + assertThat(fileCount, equalTo(4L)); + wrapper.close(); + store.close(); + engine.close(); + cleanupCompleted.countDown(); + } catch (IOException ex) { + throw new AssertionError(ex); + } + } + }; + + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final AtomicLong retentionLeasesVersion = new AtomicLong(); + final AtomicReference retentionLeasesHolder = new AtomicReference<>( + new RetentionLeases(primaryTerm, retentionLeasesVersion.get(), Collections.emptyList()) + ); + InternalEngine engine = createEngine( + config( + defaultSettings, + store, + createTempDir(), + newMergePolicy(), + null, + null, + null, + globalCheckpoint::get, + retentionLeasesHolder::get, + new NoneCircuitBreakerService(), + eventListener + ) + ); + + List segments = engine.segments(true); + assertThat(segments.isEmpty(), equalTo(true)); + + ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); + engine.index(indexForDoc(doc)); + engine.refresh("test"); + engine.flush(); + + segments = engine.segments(false); + assertThat(segments.size(), equalTo(1)); + + ParsedDocument doc2 = testParsedDocument("2", null, testDocumentWithTextField(), B_2, null); + engine.index(indexForDoc(doc2)); + engine.refresh("test"); + + segments = engine.segments(false); + assertThat(segments.size(), equalTo(2)); + + fail.setDoFail(); + // IndexWriter can throw either IOException or IllegalStateException depending on whether tragedy is set or not. + expectThrowsAnyOf( + Arrays.asList(IOException.class, IllegalStateException.class), + () -> engine.forceMerge(true, 1, false, false, false, UUIDs.randomBase64UUID()) + ); + + assertTrue(cleanupCompleted.await(10, TimeUnit.SECONDS)); + // Cleanup count will be incremented whenever cleanup is performed correctly. + long unreferencedFileCleanUpsPerformed = engine.unreferencedFileCleanUpsPerformed(); + assertThat(unreferencedFileCleanUpsPerformed, equalTo(1L)); + } catch (Exception ex) { + throw new AssertionError(ex); + } + } + + public void testUnreferencedFileCleanUpOnSegmentMergeFailureWithCleanUpDisabled() throws Exception { + MockDirectoryWrapper wrapper = newMockDirectory(); + final CountDownLatch cleanupCompleted = new CountDownLatch(1); + MockDirectoryWrapper.Failure fail = new MockDirectoryWrapper.Failure() { + public boolean didFail1; + public boolean didFail2; + + @Override + public void eval(MockDirectoryWrapper dir) throws IOException { + if (!doFail) { + return; + } + if (callStackContainsAnyOf("mergeTerms") && !didFail1) { + didFail1 = true; + throw new IOException("No space left on device"); + } + if (callStackContains(LiveDocsFormat.class, "writeLiveDocs") && !didFail2) { + didFail2 = true; + throw new IOException("No space left on device"); + } + } + }; + + wrapper.failOn(fail); + try { + Store store = createStore(wrapper); + final Engine.EventListener eventListener = new Engine.EventListener() { + @Override + public void onFailedEngine(String reason, Exception e) { + try { + // extra0 file is added as a part of + // https://lucene.apache.org/core/7_2_1/test-framework/org/apache/lucene/mockfile/ExtrasFS.html + // Safe to remove from file count along with write.lock without impacting the test + long fileCount = Arrays.stream(store.directory().listAll()) + .filter(file -> file.equals("write.lock") == false && ExtrasFS.isExtra(file) == false) + .count(); + + // Since now cleanup is not happening now, all unrefrenced files now be present as well. + assertThat(fileCount, equalTo(13L)); + wrapper.close(); + store.close(); + engine.close(); + cleanupCompleted.countDown(); + } catch (IOException ex) { + throw new AssertionError(ex); + } + } + }; + + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final AtomicLong retentionLeasesVersion = new AtomicLong(); + final AtomicReference retentionLeasesHolder = new AtomicReference<>( + new RetentionLeases(primaryTerm, retentionLeasesVersion.get(), Collections.emptyList()) + ); + InternalEngine engine = createEngine( + config( + defaultSettings, + store, + createTempDir(), + newMergePolicy(), + null, + null, + null, + globalCheckpoint::get, + retentionLeasesHolder::get, + new NoneCircuitBreakerService(), + eventListener + ) + ); + + // Disable cleanup + final IndexSettings indexSettings = engine.config().getIndexSettings(); + final IndexMetadata indexMetadata = IndexMetadata.builder(indexSettings.getIndexMetadata()) + .settings( + Settings.builder().put(indexSettings.getSettings()).put(IndexSettings.INDEX_UNREFERENCED_FILE_CLEANUP.getKey(), false) + ) + .build(); + indexSettings.updateIndexMetadata(indexMetadata); + + List segments = engine.segments(true); + assertThat(segments.isEmpty(), equalTo(true)); + + ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); + engine.index(indexForDoc(doc)); + engine.refresh("test"); + engine.flush(); + + segments = engine.segments(false); + assertThat(segments.size(), equalTo(1)); + + ParsedDocument doc2 = testParsedDocument("2", null, testDocumentWithTextField(), B_2, null); + engine.index(indexForDoc(doc2)); + engine.refresh("test"); + + segments = engine.segments(false); + assertThat(segments.size(), equalTo(2)); + + fail.setDoFail(); + // IndexWriter can throw either IOException or IllegalStateException depending on whether tragedy is set or not. + expectThrowsAnyOf( + Arrays.asList(IOException.class, IllegalStateException.class), + () -> engine.forceMerge(true, 1, false, false, false, UUIDs.randomBase64UUID()) + ); + + assertTrue(cleanupCompleted.await(10, TimeUnit.SECONDS)); + // Cleanup count will not be incremented whenever cleanup is disabled. + long unreferencedFileCleanUpsPerformed = engine.unreferencedFileCleanUpsPerformed(); + assertThat(unreferencedFileCleanUpsPerformed, equalTo(0L)); + } catch (Exception ex) { + throw new AssertionError(ex); + } + } + + public void testUnreferencedFileCleanUpFailsOnSegmentMergeFailureWhenDirectoryClosed() throws Exception { + MockDirectoryWrapper wrapper = newMockDirectory(); + final CountDownLatch cleanupCompleted = new CountDownLatch(1); + MockDirectoryWrapper.Failure fail = new MockDirectoryWrapper.Failure() { + public boolean didFail1; + public boolean didFail2; + + @Override + public void eval(MockDirectoryWrapper dir) throws IOException { + if (!doFail) { + return; + } + if (callStackContainsAnyOf("mergeTerms") && !didFail1) { + didFail1 = true; + throw new IOException("No space left on device"); + } + if (callStackContains(LiveDocsFormat.class, "writeLiveDocs") && !didFail2) { + didFail2 = true; + throw new IOException("No space left on device"); + } + } + }; + + wrapper.failOn(fail); + MockLogAppender mockAppender = MockLogAppender.createForLoggers(Loggers.getLogger(Engine.class, shardId)); + try { + Store store = createStore(wrapper); + final Engine.EventListener eventListener = new Engine.EventListener() { + @Override + public void onFailedEngine(String reason, Exception e) { + try { + store.close(); + engine.close(); + mockAppender.assertAllExpectationsMatched(); + mockAppender.close(); + cleanupCompleted.countDown(); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + } + }; + + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final AtomicLong retentionLeasesVersion = new AtomicLong(); + final AtomicReference retentionLeasesHolder = new AtomicReference<>( + new RetentionLeases(primaryTerm, retentionLeasesVersion.get(), Collections.emptyList()) + ); + InternalEngine engine = createEngine( + config( + defaultSettings, + store, + createTempDir(), + newMergePolicy(), + null, + null, + null, + globalCheckpoint::get, + retentionLeasesHolder::get, + new NoneCircuitBreakerService(), + eventListener + ) + ); + + List segments = engine.segments(true); + assertThat(segments.isEmpty(), equalTo(true)); + + ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); + engine.index(indexForDoc(doc)); + engine.refresh("test"); + engine.flush(); + + segments = engine.segments(false); + assertThat(segments.size(), equalTo(1)); + + ParsedDocument doc2 = testParsedDocument("2", null, testDocumentWithTextField(), B_2, null); + engine.index(indexForDoc(doc2)); + engine.refresh("test"); + + segments = engine.segments(false); + assertThat(segments.size(), equalTo(2)); + + fail.setDoFail(); + // Close the store so that unreferenced file cleanup will fail. + store.close(); + + final String message = "Error while deleting unreferenced file *"; + mockAppender.start(); + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation("expected message", Engine.class.getCanonicalName(), Level.ERROR, message) + ); + + // IndexWriter can throw either IOException or IllegalStateException depending on whether tragedy is set or not. + expectThrowsAnyOf( + Arrays.asList(IOException.class, IllegalStateException.class), + () -> engine.forceMerge(true, 1, false, false, false, UUIDs.randomBase64UUID()) + ); + + assertTrue(cleanupCompleted.await(10, TimeUnit.SECONDS)); + // Cleanup count will not be incremented whenever there is some issue with cleanup. + long unreferencedFileCleanUpsPerformed = engine.unreferencedFileCleanUpsPerformed(); + assertThat(unreferencedFileCleanUpsPerformed, equalTo(0L)); + } catch (Exception ex) { + throw new AssertionError(ex); + } + } + public void testSettings() { CodecService codecService = new CodecService(null, engine.config().getIndexSettings(), logger); LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig(); @@ -3683,7 +4016,7 @@ public void testRecoverFromForeignTranslog() throws IOException { final Path badTranslogLog = createTempDir(); final String badUUID = Translog.createEmptyTranslog(badTranslogLog, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); Translog translog = new LocalTranslog( - new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED, @@ -3700,7 +4033,8 @@ public void testRecoverFromForeignTranslog() throws IOException { shardId, translog.location(), config.getIndexSettings(), - BigArrays.NON_RECYCLING_INSTANCE + BigArrays.NON_RECYCLING_INSTANCE, + "" ); EngineConfig brokenConfig = new EngineConfig.Builder().shardId(shardId) @@ -6952,7 +7286,11 @@ public void testMaxSeqNoInCommitUserData() throws Exception { engine.ensureOpen(); while (running.get() && assertAndGetInternalTranslogManager(engine.translogManager()).getTranslog().currentFileGeneration() < 500) { - engine.translogManager().rollTranslogGeneration(); // make adding operations to translog slower + try { + engine.translogManager().rollTranslogGeneration(); // make adding operations to translog slower + } catch (IOException e) { + fail("io exception not expected"); + } } }); rollTranslog.start(); @@ -7408,7 +7746,8 @@ public void testNotWarmUpSearcherInEngineCtor() throws Exception { config.getTranslogConfig().getShardId(), createTempDir(), config.getTranslogConfig().getIndexSettings(), - config.getTranslogConfig().getBigArrays() + config.getTranslogConfig().getBigArrays(), + "" ); EngineConfig configWithWarmer = new EngineConfig.Builder().shardId(config.getShardId()) .threadPool(config.getThreadPool()) @@ -7560,16 +7899,86 @@ public void testMaxDocsOnReplica() throws Exception { } } - public void testGetSegmentInfosSnapshot() throws IOException { + public void testGetSegmentInfosSnapshot_AllSnapshotFilesPreservedAcrossCommit() throws Exception { IOUtils.close(store, engine); - Store store = createStore(); - InternalEngine engine = spy(createEngine(store, createTempDir())); - GatedCloseable segmentInfosSnapshot = engine.getSegmentInfosSnapshot(); - assertNotNull(segmentInfosSnapshot); - assertNotNull(segmentInfosSnapshot.get()); - verify(engine, times(1)).getLatestSegmentInfos(); - store.close(); - engine.close(); + store = createStore(); + engine = createEngine(store, createTempDir()); + List operations = generateHistoryOnReplica( + randomIntBetween(1, 100), + randomBoolean(), + randomBoolean(), + randomBoolean() + ); + for (Engine.Operation op : operations) { + applyOperation(engine, op); + } + engine.refresh("test"); + try (GatedCloseable snapshot = engine.getSegmentInfosSnapshot()) { + Collection files = snapshot.get().files(true); + Set localFiles = Set.of(store.directory().listAll()); + for (String file : files) { + assertTrue("Local directory contains file " + file, localFiles.contains(file)); + } + + engine.flush(true, true); + + try ( + final GatedCloseable snapshotAfterFlush = engine.getSegmentInfosSnapshot(); + final GatedCloseable commit = engine.acquireLastIndexCommit(false) + ) { + final SegmentInfos segmentInfos = snapshotAfterFlush.get(); + assertNotEquals(segmentInfos.getSegmentsFileName(), snapshot.get().getSegmentsFileName()); + assertEquals(commit.get().getSegmentsFileName(), segmentInfos.getSegmentsFileName()); + } + + // original files are preserved. + localFiles = Set.of(store.directory().listAll()); + for (String file : files) { + assertTrue("Local directory contains file " + file, localFiles.contains(file)); + } + } + } + + public void testGetSegmentInfosSnapshot_LatestCommitOnDiskHasHigherGenThanReader() throws Exception { + IOUtils.close(store, engine); + store = createStore(); + engine = createEngine(store, createTempDir()); + // to simulate this we need concurrent flush/refresh. + AtomicBoolean run = new AtomicBoolean(true); + AtomicInteger docId = new AtomicInteger(0); + Thread refresher = new Thread(() -> { + while (run.get()) { + try { + engine.index(indexForDoc(createParsedDoc(Integer.toString(docId.getAndIncrement()), null))); + engine.refresh("test"); + getSnapshotAndAssertFilesExistLocally(); + } catch (Exception e) { + Assert.fail(); + } + } + }); + refresher.start(); + try { + for (int i = 0; i < 10; i++) { + engine.flush(true, true); + getSnapshotAndAssertFilesExistLocally(); + } + } catch (Exception e) { + Assert.fail(); + } finally { + run.set(false); + refresher.join(); + } + } + + private void getSnapshotAndAssertFilesExistLocally() throws IOException { + try (GatedCloseable snapshot = engine.getSegmentInfosSnapshot()) { + Collection files = snapshot.get().files(true); + Set localFiles = Set.of(store.directory().listAll()); + for (String file : files) { + assertTrue("Local directory contains file " + file, localFiles.contains(file)); + } + } } public void testGetProcessedLocalCheckpoint() throws IOException { diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index e5d26686b9002..92720056e5b53 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -35,6 +35,7 @@ import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicLong; @@ -75,6 +76,28 @@ public void testCreateEngine() throws IOException { } } + public void testCreateEngineWithException() throws IOException { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); + try { + // Passing null translogPath to induce failure + final EngineConfig replicaConfig = config( + defaultSettings, + nrtEngineStore, + null, + NoMergePolicy.INSTANCE, + null, + null, + globalCheckpoint::get + ); + new NRTReplicationEngine(replicaConfig); + } catch (Exception e) { + // Ignore as engine creation will fail + } + assertEquals(1, nrtEngineStore.refCount()); + nrtEngineStore.close(); + } + public void testEngineWritesOpsToTranslog() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); @@ -149,7 +172,7 @@ public void testUpdateSegments_replicaReceivesSISWithLowerGen() throws IOExcepti assertEquals(2, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); // commit the infos to push us to segments_3. - nrtEngine.commitSegmentInfos(); + nrtEngine.flush(); assertEquals(3, nrtEngine.getLastCommittedSegmentInfos().getGeneration()); assertEquals(3, nrtEngine.getLatestSegmentInfos().getGeneration()); @@ -280,7 +303,7 @@ public void testTrimTranslogOps() throws Exception { } } - public void testCommitSegmentInfos() throws Exception { + public void testFlush() throws Exception { // This test asserts that NRTReplication#commitSegmentInfos creates a new commit point with the latest checkpoints // stored in user data. final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); @@ -301,7 +324,7 @@ public void testCommitSegmentInfos() throws Exception { LocalCheckpointTracker localCheckpointTracker = nrtEngine.getLocalCheckpointTracker(); final long maxSeqNo = localCheckpointTracker.getMaxSeqNo(); final long processedCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); - nrtEngine.commitSegmentInfos(); + nrtEngine.flush(); // ensure getLatestSegmentInfos returns an updated infos ref with correct userdata. final SegmentInfos latestSegmentInfos = nrtEngine.getLatestSegmentInfos(); @@ -319,6 +342,10 @@ public void testCommitSegmentInfos() throws Exception { userData = committedInfos.getUserData(); assertEquals(processedCheckpoint, Long.parseLong(userData.get(LOCAL_CHECKPOINT_KEY))); assertEquals(maxSeqNo, Long.parseLong(userData.get(MAX_SEQ_NO))); + + try (final GatedCloseable indexCommit = nrtEngine.acquireLastIndexCommit(true)) { + assertEquals(committedInfos.getGeneration() + 1, indexCommit.get().getGeneration()); + } } } @@ -548,6 +575,59 @@ public void testDecrefToZeroRemovesFile() throws IOException { } } + public void testCommitOnCloseThrowsException_decRefStore() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore, INDEX_SETTINGS); + List operations = generateHistoryOnReplica( + randomIntBetween(1, 10), + randomBoolean(), + randomBoolean(), + randomBoolean() + ); + indexOperations(nrtEngine, operations); + // wipe the nrt directory initially so we can sync with primary. + cleanAndCopySegmentsFromPrimary(nrtEngine); + final Optional toDelete = Set.of(nrtEngineStore.directory().listAll()).stream().filter(f -> f.endsWith(".si")).findAny(); + assertTrue(toDelete.isPresent()); + nrtEngineStore.directory().deleteFile(toDelete.get()); + assertEquals(2, nrtEngineStore.refCount()); + nrtEngine.close(); + assertEquals(1, nrtEngineStore.refCount()); + assertTrue(nrtEngineStore.isMarkedCorrupted()); + // store will throw when eventually closed, not handled here. + assertThrows(RuntimeException.class, nrtEngineStore::close); + } + + public void testFlushThrowsFlushFailedExceptionOnCorruption() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + final Store nrtEngineStore = createStore(INDEX_SETTINGS, newDirectory()); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore, INDEX_SETTINGS); + List operations = generateHistoryOnReplica( + randomIntBetween(1, 10), + randomBoolean(), + randomBoolean(), + randomBoolean() + ); + indexOperations(nrtEngine, operations); + // wipe the nrt directory initially so we can sync with primary. + cleanAndCopySegmentsFromPrimary(nrtEngine); + final Optional toDelete = Set.of(nrtEngineStore.directory().listAll()).stream().filter(f -> f.endsWith(".si")).findAny(); + assertTrue(toDelete.isPresent()); + nrtEngineStore.directory().deleteFile(toDelete.get()); + assertThrows(FlushFailedEngineException.class, nrtEngine::flush); + nrtEngine.close(); + if (nrtEngineStore.isMarkedCorrupted()) { + assertThrows(RuntimeException.class, nrtEngineStore::close); + } else { + // With certain mock directories a NoSuchFileException is thrown which is not treated as a + // corruption Exception. In these cases we don't expect any issue on store close. + nrtEngineStore.close(); + } + } + private void copySegments(Collection latestPrimaryFiles, Engine nrtEngine) throws IOException { final Store store = nrtEngine.store; final List replicaFiles = List.of(store.directory().listAll()); diff --git a/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataImplTestCase.java b/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataImplTestCase.java index 1ffacf98a6836..2b44e759f4ff9 100644 --- a/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataImplTestCase.java +++ b/server/src/test/java/org/opensearch/index/fielddata/AbstractFieldDataImplTestCase.java @@ -39,6 +39,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.util.BytesRef; import org.opensearch.core.common.Strings; @@ -144,6 +145,27 @@ public void testSingleValueAllSet() throws Exception { } } + public void testWideSortField() throws Exception { + if (this instanceof NoOrdinalsStringFieldDataTests || this instanceof PagedBytesStringFieldDataTests) { + return; // Numeric types are not supported there. + } + // integer to long widening should happen + IndexFieldData indexFieldData = getForField("int", "value"); + SortField sortField = indexFieldData.wideSortField(null, MultiValueMode.MIN, null, false); + assertTrue(((SortedNumericSortField) sortField).getNumericType() == SortField.Type.LONG); + + // long to long no widening should happen + indexFieldData = getForField("long", "value"); + sortField = indexFieldData.wideSortField(null, MultiValueMode.MIN, null, false); + assertTrue(((SortedNumericSortField) sortField).getNumericType() == SortField.Type.LONG); + + // float to float no widening should happen + indexFieldData = getForField("float", "value"); + sortField = indexFieldData.wideSortField(null, MultiValueMode.MIN, null, false); + assertTrue(((SortedNumericSortField) sortField).getNumericType() == SortField.Type.FLOAT); + + } + protected abstract void fillSingleValueWithMissing() throws Exception; public void assertValues(SortedBinaryDocValues values, int docId, BytesRef... actualValues) throws IOException { diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java index 2afd6773b15d4..054d3956596af 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldMapperTests.java @@ -66,6 +66,7 @@ protected void registerParameters(ParameterChecker checker) throws IOException { checker.registerConflictCheck("index", b -> b.field("index", false)); checker.registerConflictCheck("store", b -> b.field("store", true)); checker.registerConflictCheck("format", b -> b.field("format", "yyyy-MM-dd")); + checker.registerConflictCheck("print_format", b -> b.field("print_format", "yyyy-MM-dd")); checker.registerConflictCheck("locale", b -> b.field("locale", "es")); checker.registerConflictCheck("null_value", b -> b.field("null_value", "34500000")); checker.registerUpdateCheck(b -> b.field("ignore_malformed", true), m -> assertTrue(((DateFieldMapper) m).getIgnoreMalformed())); @@ -148,7 +149,7 @@ public void testStore() throws Exception { public void testIgnoreMalformed() throws IOException { testIgnoreMalformedForValue( "2016-03-99", - "failed to parse date field [2016-03-99] with format [strict_date_optional_time||epoch_millis]" + "failed to parse date field [2016-03-99] with format [strict_date_time_no_millis||strict_date_optional_time||epoch_millis]" ); testIgnoreMalformedForValue("-2147483648", "Invalid value for Year (valid values -999999999 - 999999999): -2147483648"); testIgnoreMalformedForValue("-522000000", "long overflow"); diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java index ae4de156b1cd3..e6ce5e9f2cd84 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java @@ -108,7 +108,7 @@ public void isFieldWithinRangeTestCase(DateFieldType ft) throws IOException { w.addDocument(doc); DirectoryReader reader = DirectoryReader.open(w); - DateMathParser alternateFormat = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser(); + DateMathParser alternateFormat = DateFieldMapper.getDefaultDateTimeFormatter().toDateMathParser(); doTestIsFieldWithinQuery(ft, reader, null, null); doTestIsFieldWithinQuery(ft, reader, null, alternateFormat); doTestIsFieldWithinQuery(ft, reader, DateTimeZone.UTC, null); @@ -159,7 +159,7 @@ private void doTestIsFieldWithinQuery(DateFieldType ft, DirectoryReader reader, public void testValueFormat() { MappedFieldType ft = new DateFieldType("field"); - long instant = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2015-10-12T14:10:55")) + long instant = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse("2015-10-12T14:10:55")) .toInstant() .toEpochMilli(); @@ -168,14 +168,14 @@ public void testValueFormat() { assertEquals("2015", new DateFieldType("field").docValueFormat("YYYY", ZoneOffset.UTC).format(instant)); assertEquals(instant, ft.docValueFormat(null, ZoneOffset.UTC).parseLong("2015-10-12T14:10:55", false, null)); assertEquals(instant + 999, ft.docValueFormat(null, ZoneOffset.UTC).parseLong("2015-10-12T14:10:55", true, null)); - long i = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2015-10-13")).toInstant().toEpochMilli(); + long i = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse("2015-10-13")).toInstant().toEpochMilli(); assertEquals(i - 1, ft.docValueFormat(null, ZoneOffset.UTC).parseLong("2015-10-12||/d", true, null)); } public void testValueForSearch() { MappedFieldType ft = new DateFieldType("field"); String date = "2015-10-12T12:09:55.000Z"; - long instant = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(date); + long instant = DateFieldMapper.getDefaultDateTimeFormatter().parseMillis(date); assertEquals(date, ft.valueForDisplay(instant)); } @@ -206,7 +206,7 @@ public void testTermQuery() { ); MappedFieldType ft = new DateFieldType("field"); String date = "2015-10-12T14:10:55"; - long instant = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date)).toInstant().toEpochMilli(); + long instant = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date)).toInstant().toEpochMilli(); Query expected = new IndexOrDocValuesQuery( LongPoint.newRangeQuery("field", instant, instant + 999), SortedNumericDocValuesField.newSlowRangeQuery("field", instant, instant + 999) @@ -218,7 +218,7 @@ public void testTermQuery() { false, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), Resolution.MILLISECONDS, null, Collections.emptyMap() @@ -255,8 +255,8 @@ public void testRangeQuery() throws IOException { MappedFieldType ft = new DateFieldType("field"); String date1 = "2015-10-12T14:10:55"; String date2 = "2016-04-28T11:33:52"; - long instant1 = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date1)).toInstant().toEpochMilli(); - long instant2 = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date2)).toInstant().toEpochMilli() + 999; + long instant1 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date1)).toInstant().toEpochMilli(); + long instant2 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date2)).toInstant().toEpochMilli() + 999; Query expected = new IndexOrDocValuesQuery( LongPoint.newRangeQuery("field", instant1, instant2), SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2) @@ -281,7 +281,7 @@ public void testRangeQuery() throws IOException { false, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), Resolution.MILLISECONDS, null, Collections.emptyMap() @@ -327,8 +327,8 @@ public void testRangeQueryWithIndexSort() { MappedFieldType ft = new DateFieldType("field"); String date1 = "2015-10-12T14:10:55"; String date2 = "2016-04-28T11:33:52"; - long instant1 = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date1)).toInstant().toEpochMilli(); - long instant2 = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date2)).toInstant().toEpochMilli() + 999; + long instant1 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date1)).toInstant().toEpochMilli(); + long instant2 = DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date2)).toInstant().toEpochMilli() + 999; Query pointQuery = LongPoint.newRangeQuery("field", instant1, instant2); Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery("field", instant1, instant2); diff --git a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java index 0d8ef6784a28c..393c448330142 100644 --- a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java @@ -44,13 +44,17 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.NormsFieldExistsQuery; +import org.apache.lucene.search.Query; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.automaton.Operations; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -128,14 +132,29 @@ public void testTermsQuery() { List terms = new ArrayList<>(); terms.add(new BytesRef("foo")); terms.add(new BytesRef("bar")); - assertEquals(new TermInSetQuery("field", terms), ft.termsQuery(Arrays.asList("foo", "bar"), null)); + Query expected = new IndexOrDocValuesQuery( + new TermInSetQuery("field", terms), + new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, "field", terms) + ); + assertEquals(expected, ft.termsQuery(Arrays.asList("foo", "bar"), null)); - MappedFieldType unsearchable = new KeywordFieldType("field", false, true, Collections.emptyMap()); + MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); + Query expectedIndex = new TermInSetQuery("field", terms); + assertEquals(expectedIndex, onlyIndexed.termsQuery(Arrays.asList("foo", "bar"), null)); + + MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); + Query expectedDocValues = new TermInSetQuery(MultiTermQuery.DOC_VALUES_REWRITE, "field", terms); + assertEquals(expectedDocValues, onlyDocValues.termsQuery(Arrays.asList("foo", "bar"), null)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> unsearchable.termsQuery(Arrays.asList("foo", "bar"), null) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); } public void testExistsQuery() { @@ -157,9 +176,36 @@ public void testExistsQuery() { public void testRangeQuery() { MappedFieldType ft = new KeywordFieldType("field"); + + Query indexExpected = new TermRangeQuery("field", BytesRefs.toBytesRef("foo"), BytesRefs.toBytesRef("bar"), true, false); + Query dvExpected = new TermRangeQuery( + "field", + BytesRefs.toBytesRef("foo"), + BytesRefs.toBytesRef("bar"), + true, + false, + MultiTermQuery.DOC_VALUES_REWRITE + ); + + Query expected = new IndexOrDocValuesQuery(indexExpected, dvExpected); + Query actual = ft.rangeQuery("foo", "bar", true, false, null, null, null, MOCK_QSC); + assertEquals(expected, actual); + + MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(indexExpected, onlyIndexed.rangeQuery("foo", "bar", true, false, null, null, null, MOCK_QSC)); + + MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); + assertEquals(dvExpected, onlyDocValues.rangeQuery("foo", "bar", true, false, null, null, null, MOCK_QSC)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> unsearchable.rangeQuery("foo", "bar", true, false, null, null, null, MOCK_QSC) + ); + assertEquals( - new TermRangeQuery("field", BytesRefs.toBytesRef("foo"), BytesRefs.toBytesRef("bar"), true, false), - ft.rangeQuery("foo", "bar", true, false, null, null, null, MOCK_QSC) + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() ); OpenSearchException ee = expectThrows( @@ -175,16 +221,37 @@ public void testRangeQuery() { public void testRegexpQuery() { MappedFieldType ft = new KeywordFieldType("field"); assertEquals( - new RegexpQuery(new Term("field", "foo.*")), + new IndexOrDocValuesQuery( + new RegexpQuery(new Term("field", "foo.*")), + new RegexpQuery(new Term("field", "foo.*"), 0, 0, RegexpQuery.DEFAULT_PROVIDER, 10, MultiTermQuery.DOC_VALUES_REWRITE) + ), ft.regexpQuery("foo.*", 0, 0, 10, MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC) ); - MappedFieldType unsearchable = new KeywordFieldType("field", false, true, Collections.emptyMap()); + Query indexExpected = new RegexpQuery(new Term("field", "foo.*")); + MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(indexExpected, onlyIndexed.regexpQuery("foo.*", 0, 0, 10, MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC)); + + Query dvExpected = new RegexpQuery( + new Term("field", "foo.*"), + 0, + 0, + RegexpQuery.DEFAULT_PROVIDER, + 10, + MultiTermQuery.DOC_VALUES_REWRITE + ); + MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); + assertEquals(dvExpected, onlyDocValues.regexpQuery("foo.*", 0, 0, 10, MultiTermQuery.DOC_VALUES_REWRITE, MOCK_QSC)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> unsearchable.regexpQuery("foo.*", 0, 0, 10, null, MOCK_QSC) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); OpenSearchException ee = expectThrows( OpenSearchException.class, @@ -200,12 +267,26 @@ public void testFuzzyQuery() { ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MOCK_QSC) ); - MappedFieldType unsearchable = new KeywordFieldType("field", false, true, Collections.emptyMap()); + Query indexExpected = new FuzzyQuery(new Term("field", "foo"), 2, 1, 50, true); + MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(indexExpected, onlyIndexed.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MOCK_QSC)); + + Query dvExpected = new FuzzyQuery(new Term("field", "foo"), 2, 1, 50, true, MultiTermQuery.DOC_VALUES_REWRITE); + MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); + assertEquals( + dvExpected, + onlyDocValues.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MultiTermQuery.DOC_VALUES_REWRITE, MOCK_QSC) + ); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> unsearchable.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MOCK_QSC) + () -> unsearchable.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MultiTermQuery.DOC_VALUES_REWRITE, MOCK_QSC) + ); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); OpenSearchException ee = expectThrows( OpenSearchException.class, @@ -214,6 +295,47 @@ public void testFuzzyQuery() { assertEquals("[fuzzy] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", ee.getMessage()); } + public void testWildCardQuery() { + MappedFieldType ft = new KeywordFieldType("field"); + Query expected = new IndexOrDocValuesQuery( + new WildcardQuery(new Term("field", new BytesRef("foo*"))), + new WildcardQuery( + new Term("field", new BytesRef("foo*")), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, + MultiTermQuery.DOC_VALUES_REWRITE + ) + ); + assertEquals(expected, ft.wildcardQuery("foo*", MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC)); + + Query indexExpected = new WildcardQuery(new Term("field", new BytesRef("foo*"))); + MappedFieldType onlyIndexed = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(indexExpected, onlyIndexed.wildcardQuery("foo*", MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC)); + + Query dvExpected = new WildcardQuery( + new Term("field", new BytesRef("foo*")), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT, + MultiTermQuery.DOC_VALUES_REWRITE + ); + MappedFieldType onlyDocValues = new KeywordFieldType("field", false, true, Collections.emptyMap()); + assertEquals(dvExpected, onlyDocValues.wildcardQuery("foo*", MultiTermQuery.DOC_VALUES_REWRITE, MOCK_QSC)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> unsearchable.wildcardQuery("foo*", MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC) + ); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + + OpenSearchException ee = expectThrows( + OpenSearchException.class, + () -> ft.wildcardQuery("foo*", MultiTermQuery.CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC_DISALLOW_EXPENSIVE) + ); + assertEquals("[wildcard] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", ee.getMessage()); + } + public void testNormalizeQueries() { MappedFieldType ft = new KeywordFieldType("field"); assertEquals(new TermQuery(new Term("field", new BytesRef("FOO"))), ft.termQuery("FOO", null)); diff --git a/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java index f3f682697a930..9a0d34c916f5c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NestedObjectMapperTests.java @@ -1097,8 +1097,8 @@ public void testLimitNestedDocsMultipleNestedFields() throws Exception { @Override protected boolean forbidPrivateIndexSettings() { - /** - * This is needed to force the index version with {@link IndexMetadata.SETTING_INDEX_VERSION_CREATED}. + /* + This is needed to force the index version with {@link IndexMetadata.SETTING_INDEX_VERSION_CREATED}. */ return false; } diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java index 4ebb160c07c8e..331bfb7b2ddf4 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldMapperTests.java @@ -361,7 +361,12 @@ public void testSerializeDefaults() throws Exception { // if type is date_range we check that the mapper contains the default format and locale // otherwise it should not contain a locale or format - assertTrue(got, got.contains("\"format\":\"strict_date_optional_time||epoch_millis\"") == type.equals("date_range")); + assertTrue( + got, + got.contains("\"format\":\"strict_date_time_no_millis||strict_date_optional_time||epoch_millis\"") == type.equals( + "date_range" + ) + ); assertTrue(got, got.contains("\"locale\":" + "\"" + Locale.ROOT + "\"") == type.equals("date_range")); } } diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java index 668666a53cd7c..755d77c6ae392 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java @@ -265,7 +265,9 @@ public void testDateRangeQueryUsingMappingFormat() { ); assertThat( ex.getMessage(), - containsString("failed to parse date field [2016-15-06T15:29:50+08:00] with format [strict_date_optional_time||epoch_millis]") + containsString( + "failed to parse date field [2016-15-06T15:29:50+08:00] with format [strict_date_time_no_millis||strict_date_optional_time||epoch_millis]" + ) ); // setting mapping format which is compatible with those dates diff --git a/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java index 2160ab6220866..d0f26f3026789 100644 --- a/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/BoolQueryBuilderTests.java @@ -47,10 +47,13 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import static org.opensearch.index.query.QueryBuilders.boolQuery; import static org.opensearch.index.query.QueryBuilders.termQuery; @@ -456,4 +459,26 @@ public void testMustRewrite() throws IOException { IllegalStateException e = expectThrows(IllegalStateException.class, () -> boolQuery.toQuery(context)); assertEquals("Rewrite first", e.getMessage()); } + + public void testVisit() { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + boolQueryBuilder.should(new TermQueryBuilder(TEXT_FIELD_NAME, "should")); + boolQueryBuilder.must(new TermQueryBuilder(TEXT_FIELD_NAME, "must1")); + boolQueryBuilder.must(new TermQueryBuilder(TEXT_FIELD_NAME, "must2")); // Add a second one to confirm that they both get visited + boolQueryBuilder.mustNot(new TermQueryBuilder(TEXT_FIELD_NAME, "mustNot")); + boolQueryBuilder.filter(new TermQueryBuilder(TEXT_FIELD_NAME, "filter")); + List visitedQueries = new ArrayList<>(); + boolQueryBuilder.visit(createTestVisitor(visitedQueries)); + assertEquals(6, visitedQueries.size()); + Set set = new HashSet<>(Arrays.asList("should", "must1", "must2", "mustNot", "filter")); + + for (QueryBuilder qb : visitedQueries) { + if (qb instanceof TermQueryBuilder) { + set.remove(((TermQueryBuilder) qb).value()); + } + } + + assertEquals(0, set.size()); + + } } diff --git a/server/src/test/java/org/opensearch/index/query/BoostingQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/BoostingQueryBuilderTests.java index 94ded24975be4..66a02a02d4e5b 100644 --- a/server/src/test/java/org/opensearch/index/query/BoostingQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/BoostingQueryBuilderTests.java @@ -38,6 +38,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.nullValue; @@ -153,4 +155,16 @@ public void testMustRewrite() throws IOException { e = expectThrows(IllegalStateException.class, () -> queryBuilder2.toQuery(context)); assertEquals("Rewrite first", e.getMessage()); } + + public void testVisit() { + BoostingQueryBuilder builder = new BoostingQueryBuilder( + new TermQueryBuilder("unmapped_field", "value"), + new TermQueryBuilder(KEYWORD_FIELD_NAME, "other_value") + ); + + List visitedQueries = new ArrayList<>(); + builder.visit(createTestVisitor(visitedQueries)); + + assertEquals(3, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java index 2bfe964ce7259..527413d2513d0 100644 --- a/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/ConstantScoreQueryBuilderTests.java @@ -39,6 +39,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.nullValue; @@ -133,4 +135,12 @@ public void testMustRewrite() throws IOException { IllegalStateException e = expectThrows(IllegalStateException.class, () -> queryBuilder.toQuery(context)); assertEquals("Rewrite first", e.getMessage()); } + + public void testVisit() { + ConstantScoreQueryBuilder queryBuilder = new ConstantScoreQueryBuilder(new TermQueryBuilder("unmapped_field", "foo")); + List visitorQueries = new ArrayList<>(); + queryBuilder.visit(createTestVisitor(visitorQueries)); + + assertEquals(2, visitorQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/DisMaxQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/DisMaxQueryBuilderTests.java index 8d589bd76f2bb..cb0df38de5c02 100644 --- a/server/src/test/java/org/opensearch/index/query/DisMaxQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/DisMaxQueryBuilderTests.java @@ -41,6 +41,7 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; @@ -156,4 +157,14 @@ public void testRewriteMultipleTimes() throws IOException { assertEquals(rewrittenAgain, expected); assertEquals(Rewriteable.rewrite(dismax, createShardContext()), expected); } + + public void testVisit() { + DisMaxQueryBuilder dismax = new DisMaxQueryBuilder(); + dismax.add(new WrapperQueryBuilder(new WrapperQueryBuilder(new MatchAllQueryBuilder().toString()).toString())); + + List visitedQueries = new ArrayList<>(); + dismax.visit(createTestVisitor(visitedQueries)); + + assertEquals(2, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilderTests.java index 7e893a0947531..db0a7bf1795ff 100644 --- a/server/src/test/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilderTests.java @@ -41,6 +41,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.opensearch.index.query.FieldMaskingSpanQueryBuilder.SPAN_FIELD_MASKING_FIELD; import static org.hamcrest.CoreMatchers.equalTo; @@ -147,4 +149,10 @@ public void testDeprecatedName() throws IOException { "Deprecated field [field_masking_span] used, expected [" + SPAN_FIELD_MASKING_FIELD.getPreferredName() + "] instead" ); } + + public void testVisit() { + List visitedQueries = new ArrayList<>(); + doCreateTestQueryBuilder().visit(createTestVisitor(visitedQueries)); + assertEquals(2, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java b/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java index 729205c9775b4..7c7598e5dc3c4 100644 --- a/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/InnerHitBuilderTests.java @@ -109,7 +109,7 @@ public void testSerialization() throws Exception { /** * Test that if we serialize and deserialize an object, further * serialization leads to identical bytes representation. - * + *

            * This is necessary to ensure because we use the serialized BytesReference * of this builder as part of the cacheKey in * {@link ShardSearchRequest} (via diff --git a/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java index e1391393f44fa..39f5bb313fe9e 100644 --- a/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java @@ -304,10 +304,16 @@ public void testToQueryBooleanPrefixMultipleFields() throws IOException { } else if (disjunct instanceof PrefixQuery) { final PrefixQuery secondDisjunct = (PrefixQuery) disjunct; assertThat(secondDisjunct.getPrefix(), equalTo(new Term(KEYWORD_FIELD_NAME, "foo bar"))); + } else if (disjunct instanceof IndexOrDocValuesQuery) { + final IndexOrDocValuesQuery iodvqDisjunct = (IndexOrDocValuesQuery) disjunct; + assertThat(iodvqDisjunct.getIndexQuery().toString(), equalTo("mapped_string_2:foo bar*")); } else { throw new AssertionError(); } - assertThat(disjunct, either(instanceOf(BooleanQuery.class)).or(instanceOf(PrefixQuery.class))); + assertThat( + disjunct, + either(instanceOf(BooleanQuery.class)).or(instanceOf(PrefixQuery.class)).or(instanceOf(IndexOrDocValuesQuery.class)) + ); } } } diff --git a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java index 8928a7c3e6203..ff6eb74f5443e 100644 --- a/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/NestedQueryBuilderTests.java @@ -66,6 +66,8 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class NestedQueryBuilderTests extends AbstractQueryTestCase { @@ -407,4 +409,22 @@ public void testDisallowExpensiveQueries() { OpenSearchException e = expectThrows(OpenSearchException.class, () -> queryBuilder.toQuery(queryShardContext)); assertEquals("[joining] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", e.getMessage()); } + + public void testSetParentFilterInContext() throws Exception { + QueryShardContext queryShardContext = createShardContext(); + QueryBuilder innerQueryBuilder = spy(new MatchAllQueryBuilderTests().createTestQueryBuilder()); + when(innerQueryBuilder.toQuery(queryShardContext)).thenAnswer(invoke -> { + QueryShardContext context = invoke.getArgument(0); + if (context.getParentFilter() == null) { + throw new Exception("Expect parent filter to be non-null"); + } + return invoke.callRealMethod(); + }); + NestedQueryBuilder nqb = new NestedQueryBuilder("nested1", innerQueryBuilder, RandomPicks.randomFrom(random(), ScoreMode.values())); + + assertNull(queryShardContext.getParentFilter()); + nqb.rewrite(queryShardContext).toQuery(queryShardContext); + assertNull(queryShardContext.getParentFilter()); + verify(innerQueryBuilder).toQuery(queryShardContext); + } } diff --git a/server/src/test/java/org/opensearch/index/query/QueryBuilderVisitorTests.java b/server/src/test/java/org/opensearch/index/query/QueryBuilderVisitorTests.java new file mode 100644 index 0000000000000..7849d3985ca59 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/query/QueryBuilderVisitorTests.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.search.BooleanClause; +import org.opensearch.test.AbstractBuilderTestCase; + +import java.util.ArrayList; +import java.util.List; + +public class QueryBuilderVisitorTests extends AbstractBuilderTestCase { + + public void testNoOpsVisitor() { + BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder(); + + List visitedQueries = new ArrayList<>(); + QueryBuilderVisitor qbv = createTestVisitor(visitedQueries); + boolQueryBuilder.visit(qbv); + QueryBuilderVisitor subQbv = qbv.getChildVisitor(BooleanClause.Occur.MUST_NOT); + assertEquals(0, visitedQueries.size()); + assertEquals(qbv, subQbv); + } + + protected static QueryBuilderVisitor createTestVisitor(List visitedQueries) { + return QueryBuilderVisitor.NO_OP_VISITOR; + } +} diff --git a/server/src/test/java/org/opensearch/index/query/QueryShapeVisitorTests.java b/server/src/test/java/org/opensearch/index/query/QueryShapeVisitorTests.java new file mode 100644 index 0000000000000..18b814aec61c2 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/query/QueryShapeVisitorTests.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.opensearch.test.OpenSearchTestCase; + +import static org.junit.Assert.assertEquals; + +public final class QueryShapeVisitorTests extends OpenSearchTestCase { + public void testQueryShapeVisitor() { + QueryBuilder builder = new BoolQueryBuilder().must(new TermQueryBuilder("foo", "bar")) + .filter(new ConstantScoreQueryBuilder(new RangeQueryBuilder("timestamp").from("12345677").to("2345678"))) + .should( + new BoolQueryBuilder().must(new MatchQueryBuilder("text", "this is some text")) + .mustNot(new RegexpQueryBuilder("color", "red.*")) + ) + .must(new TermsQueryBuilder("genre", "action", "drama", "romance")); + QueryShapeVisitor shapeVisitor = new QueryShapeVisitor(); + builder.visit(shapeVisitor); + assertEquals( + "{\"type\":\"bool\",\"must\"[{\"type\":\"term\"},{\"type\":\"terms\"}],\"filter\"[{\"type\":\"constant_score\",\"filter\"[{\"type\":\"range\"}]}],\"should\"[{\"type\":\"bool\",\"must\"[{\"type\":\"match\"}],\"must_not\"[{\"type\":\"regexp\"}]}]}", + shapeVisitor.toJson() + ); + } +} diff --git a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java index 2ac5ea8c585ea..64b3eea029bd1 100644 --- a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java @@ -87,8 +87,8 @@ protected RangeQueryBuilder doCreateTestQueryBuilder() { ZonedDateTime start = now.minusMillis(randomIntBetween(0, 1000000)).atZone(ZoneOffset.UTC); ZonedDateTime end = now.plusMillis(randomIntBetween(0, 1000000)).atZone(ZoneOffset.UTC); query = new RangeQueryBuilder(randomFrom(DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, DATE_ALIAS_FIELD_NAME)); - query.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(start)); - query.to(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(end)); + query.from(DateFieldMapper.getDefaultDateTimeFormatter().format(start)); + query.to(DateFieldMapper.getDefaultDateTimeFormatter().format(end)); // Create timestamp option only then we have a date mapper, // otherwise we could trigger exception. if (createShardContext().getMapperService().fieldType(DATE_FIELD_NAME) != null) { diff --git a/server/src/test/java/org/opensearch/index/query/ScriptScoreQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/ScriptScoreQueryBuilderTests.java index 320ff20b2ef5d..29d45fb42ffcc 100644 --- a/server/src/test/java/org/opensearch/index/query/ScriptScoreQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/ScriptScoreQueryBuilderTests.java @@ -43,7 +43,9 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.hamcrest.CoreMatchers.instanceOf; @@ -140,4 +142,11 @@ public void testDisallowExpensiveQueries() { OpenSearchException e = expectThrows(OpenSearchException.class, () -> queryBuilder.toQuery(queryShardContext)); assertEquals("[script score] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", e.getMessage()); } + + public void testVisit() { + ScriptScoreQueryBuilder scriptQueryBuilder = doCreateTestQueryBuilder(); + List visitedQueries = new ArrayList<>(); + scriptQueryBuilder.visit(createTestVisitor(visitedQueries)); + assertEquals(2, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanContainingQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanContainingQueryBuilderTests.java index fff4369e155b3..2a5441b0ea932 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanContainingQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanContainingQueryBuilderTests.java @@ -38,6 +38,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -192,4 +194,10 @@ public void testFromJsonWithNonDefaultBoostInLittleQuery() { equalTo("span_containing [little] as a nested span clause can't have non-default boost value [2.0]") ); } + + public void testVisit() { + List visitorQueries = new ArrayList<>(); + doCreateTestQueryBuilder().visit(createTestVisitor(visitorQueries)); + assertEquals(3, visitorQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanFirstQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanFirstQueryBuilderTests.java index 781d2defcfd37..82bb77726b037 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanFirstQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanFirstQueryBuilderTests.java @@ -40,6 +40,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.opensearch.index.query.QueryBuilders.spanTermQuery; import static org.hamcrest.CoreMatchers.equalTo; @@ -128,4 +130,10 @@ public void testFromJsonWithNonDefaultBoostInMatchQuery() { Exception exception = expectThrows(ParsingException.class, () -> parseQuery(json)); assertThat(exception.getMessage(), equalTo("span_first [match] as a nested span clause can't have non-default boost value [2.0]")); } + + public void testVisit() { + List visitorQueries = new ArrayList<>(); + doCreateTestQueryBuilder().visit(createTestVisitor(visitorQueries)); + assertEquals(2, visitorQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java index e20d56c6aff2a..b4abff118802e 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanMultiTermQueryBuilderTests.java @@ -60,6 +60,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static java.util.Collections.singleton; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -306,4 +308,12 @@ public void testTopNMultiTermsRewriteInsideSpan() throws Exception { } } + + public void testVisit() { + MultiTermQueryBuilder multiTermQueryBuilder = new PrefixQueryBuilderTests().createTestQueryBuilder(); + SpanMultiTermQueryBuilder spanMultiTermQueryBuilder = new SpanMultiTermQueryBuilder(multiTermQueryBuilder); + List visitorQueries = new ArrayList<>(); + spanMultiTermQueryBuilder.visit(createTestVisitor(visitorQueries)); + assertEquals(2, visitorQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanNearQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanNearQueryBuilderTests.java index 8c9130d4b7bbd..c97b541da2199 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanNearQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanNearQueryBuilderTests.java @@ -41,7 +41,9 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Iterator; +import java.util.List; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.either; @@ -231,4 +233,12 @@ public void testFromJsonWithNonDefaultBoostInInnerQuery() { Exception exception = expectThrows(ParsingException.class, () -> parseQuery(json)); assertThat(exception.getMessage(), equalTo("span_near [clauses] as a nested span clause can't have non-default boost value [2.0]")); } + + public void testVisit() { + SpanTermQueryBuilder[] spanTermQueries = new SpanTermQueryBuilderTests().createSpanTermQueryBuilders(1); + SpanNearQueryBuilder spanNearQueryBuilder = new SpanNearQueryBuilder(spanTermQueries[0], 1); + List visitorQueries = new ArrayList<>(); + spanNearQueryBuilder.visit(createTestVisitor(visitorQueries)); + assertEquals(2, visitorQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanNotQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanNotQueryBuilderTests.java index b85c7cf81b0e1..ff42b271ffca5 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanNotQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanNotQueryBuilderTests.java @@ -40,6 +40,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.opensearch.index.query.QueryBuilders.spanNearQuery; import static org.opensearch.index.query.QueryBuilders.spanTermQuery; @@ -316,4 +318,10 @@ public void testFromJsonWithNonDefaultBoostInExcludeQuery() { Exception exception = expectThrows(ParsingException.class, () -> parseQuery(json)); assertThat(exception.getMessage(), equalTo("span_not [exclude] as a nested span clause can't have non-default boost value [2.0]")); } + + public void testVisit() { + List visitedQueries = new ArrayList<>(); + doCreateTestQueryBuilder().visit(createTestVisitor(visitedQueries)); + assertEquals(3, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanOrQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanOrQueryBuilderTests.java index 45323b5df74df..eb4b8fd486cb0 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanOrQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanOrQueryBuilderTests.java @@ -39,7 +39,9 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Iterator; +import java.util.List; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -137,4 +139,12 @@ public void testFromJsonWithNonDefaultBoostInInnerQuery() { Exception exception = expectThrows(ParsingException.class, () -> parseQuery(json)); assertThat(exception.getMessage(), equalTo("span_or [clauses] as a nested span clause can't have non-default boost value [2.0]")); } + + public void testVisit() { + SpanTermQueryBuilder spanTermQueryBuilder = new SpanTermQueryBuilder("demo", "demo"); + SpanOrQueryBuilder spanOrQueryBuilder = new SpanOrQueryBuilder(spanTermQueryBuilder); + List visitedQueries = new ArrayList<>(); + spanOrQueryBuilder.visit(createTestVisitor(visitedQueries)); + assertEquals(2, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/query/SpanWithinQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SpanWithinQueryBuilderTests.java index e8b6a21254ff8..74b955e872d51 100644 --- a/server/src/test/java/org/opensearch/index/query/SpanWithinQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SpanWithinQueryBuilderTests.java @@ -38,6 +38,8 @@ import org.opensearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -189,4 +191,10 @@ public void testFromJsonWithNonDefaultBoostInLittleQuery() { equalTo("span_within [little] as a nested span clause can't have non-default boost value [2.0]") ); } + + public void testVisit() { + List visitedQueries = new ArrayList<>(); + doCreateTestQueryBuilder().visit(createTestVisitor(visitedQueries)); + assertEquals(3, visitedQueries.size()); + } } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java index 94934d5b4dca6..c87cdfcc8f1a1 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java @@ -23,19 +23,17 @@ import java.util.HashMap; import java.util.Map; -import static org.mockito.Mockito.mock; +import static org.opensearch.index.remote.RemoteSegmentTransferTracker.currentTimeMsUsingSystemNanos; public class RemoteSegmentTransferTrackerTests extends OpenSearchTestCase { - - private RemoteStorePressureSettings pressureSettings; - + private RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; private ClusterService clusterService; private ThreadPool threadPool; private ShardId shardId; - private RemoteSegmentTransferTracker pressureTracker; + private RemoteSegmentTransferTracker transferTracker; private DirectoryFileTransferTracker directoryFileTransferTracker; @@ -48,7 +46,7 @@ public void setUp() throws Exception { new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool ); - pressureSettings = new RemoteStorePressureSettings(clusterService, Settings.EMPTY, mock(RemoteStorePressureService.class)); + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, Settings.EMPTY); shardId = new ShardId("index", "uuid", 0); directoryFileTransferTracker = new DirectoryFileTransferTracker(); } @@ -60,513 +58,493 @@ public void tearDown() throws Exception { } public void testGetShardId() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - assertEquals(shardId, pressureTracker.getShardId()); + assertEquals(shardId, transferTracker.getShardId()); } public void testUpdateLocalRefreshSeqNo() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); long refreshSeqNo = 2; - pressureTracker.updateLocalRefreshSeqNo(refreshSeqNo); - assertEquals(refreshSeqNo, pressureTracker.getLocalRefreshSeqNo()); + transferTracker.updateLocalRefreshSeqNo(refreshSeqNo); + assertEquals(refreshSeqNo, transferTracker.getLocalRefreshSeqNo()); } public void testUpdateRemoteRefreshSeqNo() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); long refreshSeqNo = 4; - pressureTracker.updateRemoteRefreshSeqNo(refreshSeqNo); - assertEquals(refreshSeqNo, pressureTracker.getRemoteRefreshSeqNo()); + transferTracker.updateRemoteRefreshSeqNo(refreshSeqNo); + assertEquals(refreshSeqNo, transferTracker.getRemoteRefreshSeqNo()); } public void testUpdateLocalRefreshTimeMs() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - long refreshTimeMs = System.nanoTime() / 1_000_000L + randomIntBetween(10, 100); - pressureTracker.updateLocalRefreshTimeMs(refreshTimeMs); - assertEquals(refreshTimeMs, pressureTracker.getLocalRefreshTimeMs()); + long refreshTimeMs = currentTimeMsUsingSystemNanos() + randomIntBetween(10, 100); + transferTracker.updateLocalRefreshTimeMs(refreshTimeMs); + assertEquals(refreshTimeMs, transferTracker.getLocalRefreshTimeMs()); } public void testUpdateRemoteRefreshTimeMs() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - long refreshTimeMs = System.nanoTime() / 1_000_000 + randomIntBetween(10, 100); - pressureTracker.updateRemoteRefreshTimeMs(refreshTimeMs); - assertEquals(refreshTimeMs, pressureTracker.getRemoteRefreshTimeMs()); + long refreshTimeMs = currentTimeMsUsingSystemNanos() + randomIntBetween(10, 100); + transferTracker.updateRemoteRefreshTimeMs(refreshTimeMs); + assertEquals(refreshTimeMs, transferTracker.getRemoteRefreshTimeMs()); } public void testLastDownloadTimestampMs() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); long currentTimeInMs = System.currentTimeMillis(); - pressureTracker.getDirectoryFileTransferTracker().updateLastTransferTimestampMs(currentTimeInMs); - assertEquals(currentTimeInMs, pressureTracker.getDirectoryFileTransferTracker().getLastTransferTimestampMs()); + transferTracker.getDirectoryFileTransferTracker().updateLastTransferTimestampMs(currentTimeInMs); + assertEquals(currentTimeInMs, transferTracker.getDirectoryFileTransferTracker().getLastTransferTimestampMs()); } public void testComputeSeqNoLagOnUpdate() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); int localRefreshSeqNo = randomIntBetween(50, 100); int remoteRefreshSeqNo = randomIntBetween(20, 50); - pressureTracker.updateLocalRefreshSeqNo(localRefreshSeqNo); - assertEquals(localRefreshSeqNo, pressureTracker.getRefreshSeqNoLag()); - pressureTracker.updateRemoteRefreshSeqNo(remoteRefreshSeqNo); - assertEquals(localRefreshSeqNo - remoteRefreshSeqNo, pressureTracker.getRefreshSeqNoLag()); + transferTracker.updateLocalRefreshSeqNo(localRefreshSeqNo); + assertEquals(localRefreshSeqNo, transferTracker.getRefreshSeqNoLag()); + transferTracker.updateRemoteRefreshSeqNo(remoteRefreshSeqNo); + assertEquals(localRefreshSeqNo - remoteRefreshSeqNo, transferTracker.getRefreshSeqNoLag()); } - public void testComputeTimeLagOnUpdate() { - pressureTracker = new RemoteSegmentTransferTracker( + public void testComputeTimeLagOnUpdate() throws InterruptedException { + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - long currentLocalRefreshTimeMs = pressureTracker.getLocalRefreshTimeMs(); - long currentTimeMs = System.nanoTime() / 1_000_000L; - long localRefreshTimeMs = currentTimeMs + randomIntBetween(100, 500); - long remoteRefreshTimeMs = currentTimeMs + randomIntBetween(50, 99); - pressureTracker.updateLocalRefreshTimeMs(localRefreshTimeMs); - assertEquals(localRefreshTimeMs - currentLocalRefreshTimeMs, pressureTracker.getTimeMsLag()); - pressureTracker.updateRemoteRefreshTimeMs(remoteRefreshTimeMs); - assertEquals(localRefreshTimeMs - remoteRefreshTimeMs, pressureTracker.getTimeMsLag()); + + // No lag if there is a remote upload corresponding to a local refresh + assertEquals(0, transferTracker.getTimeMsLag()); + + // Set a local refresh time that is higher than remote refresh time + Thread.sleep(1); + transferTracker.updateLocalRefreshTimeMs(currentTimeMsUsingSystemNanos()); + + // Sleep for 100ms and then the lag should be within 100ms +/- 20ms + Thread.sleep(100); + assertTrue(Math.abs(transferTracker.getTimeMsLag() - 100) <= 20); + + transferTracker.updateRemoteRefreshTimeMs(transferTracker.getLocalRefreshTimeMs()); + transferTracker.updateLocalRefreshTimeMs(currentTimeMsUsingSystemNanos()); + long random = randomIntBetween(50, 200); + Thread.sleep(random); + assertTrue(Math.abs(transferTracker.getTimeMsLag() - random) <= 20); } public void testAddUploadBytesStarted() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); long bytesToAdd = randomLongBetween(1000, 1000000); - pressureTracker.addUploadBytesStarted(bytesToAdd); - assertEquals(bytesToAdd, pressureTracker.getUploadBytesStarted()); + transferTracker.addUploadBytesStarted(bytesToAdd); + assertEquals(bytesToAdd, transferTracker.getUploadBytesStarted()); long moreBytesToAdd = randomLongBetween(1000, 10000); - pressureTracker.addUploadBytesStarted(moreBytesToAdd); - assertEquals(bytesToAdd + moreBytesToAdd, pressureTracker.getUploadBytesStarted()); + transferTracker.addUploadBytesStarted(moreBytesToAdd); + assertEquals(bytesToAdd + moreBytesToAdd, transferTracker.getUploadBytesStarted()); } public void testAddUploadBytesFailed() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); long bytesToAdd = randomLongBetween(1000, 1000000); - pressureTracker.addUploadBytesFailed(bytesToAdd); - assertEquals(bytesToAdd, pressureTracker.getUploadBytesFailed()); long moreBytesToAdd = randomLongBetween(1000, 10000); - pressureTracker.addUploadBytesFailed(moreBytesToAdd); - assertEquals(bytesToAdd + moreBytesToAdd, pressureTracker.getUploadBytesFailed()); + transferTracker.addUploadBytesStarted(bytesToAdd + moreBytesToAdd); + transferTracker.addUploadBytesFailed(bytesToAdd); + assertEquals(bytesToAdd, transferTracker.getUploadBytesFailed()); + transferTracker.addUploadBytesFailed(moreBytesToAdd); + assertEquals(bytesToAdd + moreBytesToAdd, transferTracker.getUploadBytesFailed()); } public void testAddUploadBytesSucceeded() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); long bytesToAdd = randomLongBetween(1000, 1000000); - pressureTracker.addUploadBytesSucceeded(bytesToAdd); - assertEquals(bytesToAdd, pressureTracker.getUploadBytesSucceeded()); long moreBytesToAdd = randomLongBetween(1000, 10000); - pressureTracker.addUploadBytesSucceeded(moreBytesToAdd); - assertEquals(bytesToAdd + moreBytesToAdd, pressureTracker.getUploadBytesSucceeded()); + transferTracker.addUploadBytesStarted(bytesToAdd + moreBytesToAdd); + transferTracker.addUploadBytesSucceeded(bytesToAdd); + assertEquals(bytesToAdd, transferTracker.getUploadBytesSucceeded()); + transferTracker.addUploadBytesSucceeded(moreBytesToAdd); + assertEquals(bytesToAdd + moreBytesToAdd, transferTracker.getUploadBytesSucceeded()); } public void testAddDownloadBytesStarted() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); long bytesToAdd = randomLongBetween(1000, 1000000); - pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesStarted(bytesToAdd); - assertEquals(bytesToAdd, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesStarted()); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesStarted(bytesToAdd); + assertEquals(bytesToAdd, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesStarted()); long moreBytesToAdd = randomLongBetween(1000, 10000); - pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesStarted(moreBytesToAdd); - assertEquals(bytesToAdd + moreBytesToAdd, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesStarted()); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesStarted(moreBytesToAdd); + assertEquals(bytesToAdd + moreBytesToAdd, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesStarted()); } public void testAddDownloadBytesFailed() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); long bytesToAdd = randomLongBetween(1000, 1000000); - pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesFailed(bytesToAdd); - assertEquals(bytesToAdd, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesFailed()); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesFailed(bytesToAdd, System.currentTimeMillis()); + assertEquals(bytesToAdd, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesFailed()); long moreBytesToAdd = randomLongBetween(1000, 10000); - pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesFailed(moreBytesToAdd); - assertEquals(bytesToAdd + moreBytesToAdd, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesFailed()); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesFailed(moreBytesToAdd, System.currentTimeMillis()); + assertEquals(bytesToAdd + moreBytesToAdd, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesFailed()); } public void testAddDownloadBytesSucceeded() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); long bytesToAdd = randomLongBetween(1000, 1000000); - pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesSucceeded(bytesToAdd, System.currentTimeMillis()); - assertEquals(bytesToAdd, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesSucceeded()); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesSucceeded(bytesToAdd, System.currentTimeMillis()); + assertEquals(bytesToAdd, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesSucceeded()); long moreBytesToAdd = randomLongBetween(1000, 10000); - pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesSucceeded(moreBytesToAdd, System.currentTimeMillis()); - assertEquals(bytesToAdd + moreBytesToAdd, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesSucceeded()); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesSucceeded(moreBytesToAdd, System.currentTimeMillis()); + assertEquals(bytesToAdd + moreBytesToAdd, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesSucceeded()); } public void testGetInflightUploadBytes() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); long bytesStarted = randomLongBetween(10000, 100000); long bytesSucceeded = randomLongBetween(1000, 10000); long bytesFailed = randomLongBetween(100, 1000); - pressureTracker.addUploadBytesStarted(bytesStarted); - pressureTracker.addUploadBytesSucceeded(bytesSucceeded); - pressureTracker.addUploadBytesFailed(bytesFailed); - assertEquals(bytesStarted - bytesSucceeded - bytesFailed, pressureTracker.getInflightUploadBytes()); + transferTracker.addUploadBytesStarted(bytesStarted); + transferTracker.addUploadBytesSucceeded(bytesSucceeded); + transferTracker.addUploadBytesFailed(bytesFailed); + assertEquals(bytesStarted - bytesSucceeded - bytesFailed, transferTracker.getInflightUploadBytes()); } public void testIncrementTotalUploadsStarted() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - pressureTracker.incrementTotalUploadsStarted(); - assertEquals(1, pressureTracker.getTotalUploadsStarted()); - pressureTracker.incrementTotalUploadsStarted(); - assertEquals(2, pressureTracker.getTotalUploadsStarted()); + transferTracker.incrementTotalUploadsStarted(); + assertEquals(1, transferTracker.getTotalUploadsStarted()); + transferTracker.incrementTotalUploadsStarted(); + assertEquals(2, transferTracker.getTotalUploadsStarted()); } public void testIncrementTotalUploadsFailed() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - pressureTracker.incrementTotalUploadsFailed(); - assertEquals(1, pressureTracker.getTotalUploadsFailed()); - pressureTracker.incrementTotalUploadsFailed(); - assertEquals(2, pressureTracker.getTotalUploadsFailed()); + transferTracker.incrementTotalUploadsStarted(); + transferTracker.incrementTotalUploadsStarted(); + transferTracker.incrementTotalUploadsFailed(); + assertEquals(1, transferTracker.getTotalUploadsFailed()); + transferTracker.incrementTotalUploadsFailed(); + assertEquals(2, transferTracker.getTotalUploadsFailed()); } public void testIncrementTotalUploadSucceeded() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - pressureTracker.incrementTotalUploadsSucceeded(); - assertEquals(1, pressureTracker.getTotalUploadsSucceeded()); - pressureTracker.incrementTotalUploadsSucceeded(); - assertEquals(2, pressureTracker.getTotalUploadsSucceeded()); + transferTracker.incrementTotalUploadsStarted(); + transferTracker.incrementTotalUploadsStarted(); + transferTracker.incrementTotalUploadsSucceeded(); + assertEquals(1, transferTracker.getTotalUploadsSucceeded()); + transferTracker.incrementTotalUploadsSucceeded(); + assertEquals(2, transferTracker.getTotalUploadsSucceeded()); } public void testGetInflightUploads() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - pressureTracker.incrementTotalUploadsStarted(); - assertEquals(1, pressureTracker.getInflightUploads()); - pressureTracker.incrementTotalUploadsStarted(); - assertEquals(2, pressureTracker.getInflightUploads()); - pressureTracker.incrementTotalUploadsSucceeded(); - assertEquals(1, pressureTracker.getInflightUploads()); - pressureTracker.incrementTotalUploadsFailed(); - assertEquals(0, pressureTracker.getInflightUploads()); + transferTracker.incrementTotalUploadsStarted(); + assertEquals(1, transferTracker.getInflightUploads()); + transferTracker.incrementTotalUploadsStarted(); + assertEquals(2, transferTracker.getInflightUploads()); + transferTracker.incrementTotalUploadsSucceeded(); + assertEquals(1, transferTracker.getInflightUploads()); + transferTracker.incrementTotalUploadsFailed(); + assertEquals(0, transferTracker.getInflightUploads()); } public void testIncrementRejectionCount() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - pressureTracker.incrementRejectionCount(); - assertEquals(1, pressureTracker.getRejectionCount()); - pressureTracker.incrementRejectionCount(); - assertEquals(2, pressureTracker.getRejectionCount()); + transferTracker.incrementRejectionCount(); + assertEquals(1, transferTracker.getRejectionCount()); + transferTracker.incrementRejectionCount(); + assertEquals(2, transferTracker.getRejectionCount()); } public void testGetConsecutiveFailureCount() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - pressureTracker.incrementTotalUploadsFailed(); - assertEquals(1, pressureTracker.getConsecutiveFailureCount()); - pressureTracker.incrementTotalUploadsFailed(); - assertEquals(2, pressureTracker.getConsecutiveFailureCount()); - pressureTracker.incrementTotalUploadsSucceeded(); - assertEquals(0, pressureTracker.getConsecutiveFailureCount()); + transferTracker.incrementTotalUploadsStarted(); + transferTracker.incrementTotalUploadsStarted(); + transferTracker.incrementTotalUploadsStarted(); + transferTracker.incrementTotalUploadsFailed(); + assertEquals(1, transferTracker.getConsecutiveFailureCount()); + transferTracker.incrementTotalUploadsFailed(); + assertEquals(2, transferTracker.getConsecutiveFailureCount()); + transferTracker.incrementTotalUploadsSucceeded(); + assertEquals(0, transferTracker.getConsecutiveFailureCount()); } public void testComputeBytesLag() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); // Create local file size map Map fileSizeMap = new HashMap<>(); fileSizeMap.put("a", 100L); fileSizeMap.put("b", 105L); - pressureTracker.updateLatestLocalFileNameLengthMap(fileSizeMap.keySet(), fileSizeMap::get); - assertEquals(205L, pressureTracker.getBytesLag()); + transferTracker.updateLatestLocalFileNameLengthMap(fileSizeMap.keySet(), fileSizeMap::get); + assertEquals(205L, transferTracker.getBytesLag()); - pressureTracker.addToLatestUploadedFiles("a"); - assertEquals(105L, pressureTracker.getBytesLag()); + transferTracker.addToLatestUploadedFiles("a"); + assertEquals(105L, transferTracker.getBytesLag()); fileSizeMap.put("c", 115L); - pressureTracker.updateLatestLocalFileNameLengthMap(fileSizeMap.keySet(), fileSizeMap::get); - assertEquals(220L, pressureTracker.getBytesLag()); + transferTracker.updateLatestLocalFileNameLengthMap(fileSizeMap.keySet(), fileSizeMap::get); + assertEquals(220L, transferTracker.getBytesLag()); - pressureTracker.addToLatestUploadedFiles("b"); - assertEquals(115L, pressureTracker.getBytesLag()); + transferTracker.addToLatestUploadedFiles("b"); + assertEquals(115L, transferTracker.getBytesLag()); - pressureTracker.addToLatestUploadedFiles("c"); - assertEquals(0L, pressureTracker.getBytesLag()); + transferTracker.addToLatestUploadedFiles("c"); + assertEquals(0L, transferTracker.getBytesLag()); } - public void testIsUploadBytesAverageReady() { - int uploadBytesMovingAverageWindowSize = pressureSettings.getUploadBytesMovingAverageWindowSize(); - pressureTracker = new RemoteSegmentTransferTracker( - shardId, - directoryFileTransferTracker, - uploadBytesMovingAverageWindowSize, - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - assertFalse(pressureTracker.isUploadBytesAverageReady()); + public void testisUploadBytesMovingAverageReady() { + int movingAverageWindowSize = remoteStoreStatsTrackerFactory.getMovingAverageWindowSize(); + transferTracker = new RemoteSegmentTransferTracker(shardId, directoryFileTransferTracker, movingAverageWindowSize); + assertFalse(transferTracker.isUploadBytesMovingAverageReady()); long sum = 0; - for (int i = 1; i < uploadBytesMovingAverageWindowSize; i++) { - pressureTracker.addUploadBytes(i); + for (int i = 1; i < movingAverageWindowSize; i++) { + transferTracker.updateUploadBytesMovingAverage(i); sum += i; - assertFalse(pressureTracker.isUploadBytesAverageReady()); - assertEquals((double) sum / i, pressureTracker.getUploadBytesAverage(), 0.0d); + assertFalse(transferTracker.isUploadBytesMovingAverageReady()); + assertEquals((double) sum / i, transferTracker.getUploadBytesMovingAverage(), 0.0d); } - pressureTracker.addUploadBytes(uploadBytesMovingAverageWindowSize); - sum += uploadBytesMovingAverageWindowSize; - assertTrue(pressureTracker.isUploadBytesAverageReady()); - assertEquals((double) sum / uploadBytesMovingAverageWindowSize, pressureTracker.getUploadBytesAverage(), 0.0d); + transferTracker.updateUploadBytesMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(transferTracker.isUploadBytesMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, transferTracker.getUploadBytesMovingAverage(), 0.0d); - pressureTracker.addUploadBytes(100); + transferTracker.updateUploadBytesMovingAverage(100); sum = sum + 100 - 1; - assertEquals((double) sum / uploadBytesMovingAverageWindowSize, pressureTracker.getUploadBytesAverage(), 0.0d); + assertEquals((double) sum / movingAverageWindowSize, transferTracker.getUploadBytesMovingAverage(), 0.0d); } public void testIsUploadBytesPerSecAverageReady() { - int uploadBytesPerSecMovingAverageWindowSize = pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(); - pressureTracker = new RemoteSegmentTransferTracker( - shardId, - directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - uploadBytesPerSecMovingAverageWindowSize, - pressureSettings.getUploadTimeMovingAverageWindowSize() - ); - assertFalse(pressureTracker.isUploadBytesPerSecAverageReady()); + int movingAverageWindowSize = remoteStoreStatsTrackerFactory.getMovingAverageWindowSize(); + transferTracker = new RemoteSegmentTransferTracker(shardId, directoryFileTransferTracker, movingAverageWindowSize); + assertFalse(transferTracker.isUploadBytesPerSecMovingAverageReady()); long sum = 0; - for (int i = 1; i < uploadBytesPerSecMovingAverageWindowSize; i++) { - pressureTracker.addUploadBytesPerSec(i); + for (int i = 1; i < movingAverageWindowSize; i++) { + transferTracker.updateUploadBytesPerSecMovingAverage(i); sum += i; - assertFalse(pressureTracker.isUploadBytesPerSecAverageReady()); - assertEquals((double) sum / i, pressureTracker.getUploadBytesPerSecAverage(), 0.0d); + assertFalse(transferTracker.isUploadBytesPerSecMovingAverageReady()); + assertEquals((double) sum / i, transferTracker.getUploadBytesPerSecMovingAverage(), 0.0d); } - pressureTracker.addUploadBytesPerSec(uploadBytesPerSecMovingAverageWindowSize); - sum += uploadBytesPerSecMovingAverageWindowSize; - assertTrue(pressureTracker.isUploadBytesPerSecAverageReady()); - assertEquals((double) sum / uploadBytesPerSecMovingAverageWindowSize, pressureTracker.getUploadBytesPerSecAverage(), 0.0d); + transferTracker.updateUploadBytesPerSecMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(transferTracker.isUploadBytesPerSecMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, transferTracker.getUploadBytesPerSecMovingAverage(), 0.0d); - pressureTracker.addUploadBytesPerSec(100); + transferTracker.updateUploadBytesPerSecMovingAverage(100); sum = sum + 100 - 1; - assertEquals((double) sum / uploadBytesPerSecMovingAverageWindowSize, pressureTracker.getUploadBytesPerSecAverage(), 0.0d); + assertEquals((double) sum / movingAverageWindowSize, transferTracker.getUploadBytesPerSecMovingAverage(), 0.0d); } public void testIsUploadTimeMsAverageReady() { - int uploadTimeMovingAverageWindowSize = pressureSettings.getUploadTimeMovingAverageWindowSize(); - pressureTracker = new RemoteSegmentTransferTracker( - shardId, - directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - uploadTimeMovingAverageWindowSize - ); - assertFalse(pressureTracker.isUploadTimeMsAverageReady()); + int movingAverageWindowSize = remoteStoreStatsTrackerFactory.getMovingAverageWindowSize(); + transferTracker = new RemoteSegmentTransferTracker(shardId, directoryFileTransferTracker, movingAverageWindowSize); + assertFalse(transferTracker.isUploadTimeMovingAverageReady()); long sum = 0; - for (int i = 1; i < uploadTimeMovingAverageWindowSize; i++) { - pressureTracker.addUploadTimeMs(i); + for (int i = 1; i < movingAverageWindowSize; i++) { + transferTracker.updateUploadTimeMovingAverage(i); sum += i; - assertFalse(pressureTracker.isUploadTimeMsAverageReady()); - assertEquals((double) sum / i, pressureTracker.getUploadTimeMsAverage(), 0.0d); + assertFalse(transferTracker.isUploadTimeMovingAverageReady()); + assertEquals((double) sum / i, transferTracker.getUploadTimeMovingAverage(), 0.0d); } - pressureTracker.addUploadTimeMs(uploadTimeMovingAverageWindowSize); - sum += uploadTimeMovingAverageWindowSize; - assertTrue(pressureTracker.isUploadTimeMsAverageReady()); - assertEquals((double) sum / uploadTimeMovingAverageWindowSize, pressureTracker.getUploadTimeMsAverage(), 0.0d); + transferTracker.updateUploadTimeMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(transferTracker.isUploadTimeMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, transferTracker.getUploadTimeMovingAverage(), 0.0d); - pressureTracker.addUploadTimeMs(100); + transferTracker.updateUploadTimeMovingAverage(100); sum = sum + 100 - 1; - assertEquals((double) sum / uploadTimeMovingAverageWindowSize, pressureTracker.getUploadTimeMsAverage(), 0.0d); + assertEquals((double) sum / movingAverageWindowSize, transferTracker.getUploadTimeMovingAverage(), 0.0d); } public void testIsDownloadBytesAverageReady() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - assertFalse(pressureTracker.getDirectoryFileTransferTracker().isTransferredBytesAverageReady()); + assertFalse(transferTracker.getDirectoryFileTransferTracker().isTransferredBytesAverageReady()); long sum = 0; for (int i = 1; i < 20; i++) { - pressureTracker.getDirectoryFileTransferTracker().updateLastSuccessfulTransferSize(i); + transferTracker.getDirectoryFileTransferTracker().updateSuccessfulTransferSize(i); sum += i; - assertFalse(pressureTracker.getDirectoryFileTransferTracker().isTransferredBytesAverageReady()); - assertEquals((double) sum / i, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesAverage(), 0.0d); + assertFalse(transferTracker.getDirectoryFileTransferTracker().isTransferredBytesAverageReady()); + assertEquals((double) sum / i, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesAverage(), 0.0d); } - pressureTracker.getDirectoryFileTransferTracker().updateLastSuccessfulTransferSize(20); + transferTracker.getDirectoryFileTransferTracker().updateSuccessfulTransferSize(20); sum += 20; - assertTrue(pressureTracker.getDirectoryFileTransferTracker().isTransferredBytesAverageReady()); - assertEquals((double) sum / 20, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesAverage(), 0.0d); + assertTrue(transferTracker.getDirectoryFileTransferTracker().isTransferredBytesAverageReady()); + assertEquals((double) sum / 20, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesAverage(), 0.0d); - pressureTracker.getDirectoryFileTransferTracker().updateLastSuccessfulTransferSize(100); + transferTracker.getDirectoryFileTransferTracker().updateSuccessfulTransferSize(100); sum = sum + 100 - 1; - assertEquals((double) sum / 20, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesAverage(), 0.0d); + assertEquals((double) sum / 20, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesAverage(), 0.0d); } public void testIsDownloadBytesPerSecAverageReady() { - pressureTracker = new RemoteSegmentTransferTracker( + transferTracker = new RemoteSegmentTransferTracker( shardId, directoryFileTransferTracker, - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - assertFalse(pressureTracker.getDirectoryFileTransferTracker().isTransferredBytesPerSecAverageReady()); + assertFalse(transferTracker.getDirectoryFileTransferTracker().isTransferredBytesPerSecAverageReady()); long sum = 0; for (int i = 1; i < 20; i++) { - pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesPerSec(i); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesPerSec(i); sum += i; - assertFalse(pressureTracker.getDirectoryFileTransferTracker().isTransferredBytesPerSecAverageReady()); - assertEquals((double) sum / i, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesPerSecAverage(), 0.0d); + assertFalse(transferTracker.getDirectoryFileTransferTracker().isTransferredBytesPerSecAverageReady()); + assertEquals((double) sum / i, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesPerSecAverage(), 0.0d); } - pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesPerSec(20); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesPerSec(20); sum += 20; - assertTrue(pressureTracker.getDirectoryFileTransferTracker().isTransferredBytesPerSecAverageReady()); - assertEquals((double) sum / 20, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesPerSecAverage(), 0.0d); + assertTrue(transferTracker.getDirectoryFileTransferTracker().isTransferredBytesPerSecAverageReady()); + assertEquals((double) sum / 20, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesPerSecAverage(), 0.0d); - pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesPerSec(100); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesPerSec(100); sum = sum + 100 - 1; - assertEquals((double) sum / 20, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesPerSecAverage(), 0.0d); + assertEquals((double) sum / 20, transferTracker.getDirectoryFileTransferTracker().getTransferredBytesPerSecAverage(), 0.0d); + } + + public void testAddTotalUploadTimeInMs() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + long timeToAdd = randomLongBetween(100, 200); + transferTracker.addUploadTimeInMillis(timeToAdd); + assertEquals(timeToAdd, transferTracker.getTotalUploadTimeInMillis()); + long moreTimeToAdd = randomLongBetween(100, 200); + transferTracker.addUploadTimeInMillis(moreTimeToAdd); + assertEquals(timeToAdd + moreTimeToAdd, transferTracker.getTotalUploadTimeInMillis()); + } + + public void testAddTotalTransferTimeMs() { + transferTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + long timeToAdd = randomLongBetween(100, 200); + transferTracker.getDirectoryFileTransferTracker().addTotalTransferTimeInMs(timeToAdd); + assertEquals(timeToAdd, transferTracker.getDirectoryFileTransferTracker().getTotalTransferTimeInMs()); + long moreTimeToAdd = randomLongBetween(100, 200); + transferTracker.getDirectoryFileTransferTracker().addTotalTransferTimeInMs(moreTimeToAdd); + assertEquals(timeToAdd + moreTimeToAdd, transferTracker.getDirectoryFileTransferTracker().getTotalTransferTimeInMs()); } /** * Tests whether RemoteSegmentTransferTracker.Stats object generated correctly from RemoteSegmentTransferTracker. * */ public void testStatsObjectCreation() { - pressureTracker = constructTracker(); - RemoteSegmentTransferTracker.Stats pressureTrackerStats = pressureTracker.stats(); - assertEquals(pressureTracker.getShardId(), pressureTrackerStats.shardId); - assertEquals(pressureTracker.getTimeMsLag(), (int) pressureTrackerStats.refreshTimeLagMs); - assertEquals(pressureTracker.getLocalRefreshSeqNo(), (int) pressureTrackerStats.localRefreshNumber); - assertEquals(pressureTracker.getRemoteRefreshSeqNo(), (int) pressureTrackerStats.remoteRefreshNumber); - assertEquals(pressureTracker.getBytesLag(), (int) pressureTrackerStats.bytesLag); - assertEquals(pressureTracker.getRejectionCount(), (int) pressureTrackerStats.rejectionCount); - assertEquals(pressureTracker.getConsecutiveFailureCount(), (int) pressureTrackerStats.consecutiveFailuresCount); - assertEquals(pressureTracker.getUploadBytesStarted(), (int) pressureTrackerStats.uploadBytesStarted); - assertEquals(pressureTracker.getUploadBytesSucceeded(), (int) pressureTrackerStats.uploadBytesSucceeded); - assertEquals(pressureTracker.getUploadBytesFailed(), (int) pressureTrackerStats.uploadBytesFailed); - assertEquals(pressureTracker.getUploadBytesAverage(), pressureTrackerStats.uploadBytesMovingAverage, 0); - assertEquals(pressureTracker.getUploadBytesPerSecAverage(), pressureTrackerStats.uploadBytesPerSecMovingAverage, 0); - assertEquals(pressureTracker.getUploadTimeMsAverage(), pressureTrackerStats.uploadTimeMovingAverage, 0); - assertEquals(pressureTracker.getTotalUploadsStarted(), (int) pressureTrackerStats.totalUploadsStarted); - assertEquals(pressureTracker.getTotalUploadsSucceeded(), (int) pressureTrackerStats.totalUploadsSucceeded); - assertEquals(pressureTracker.getTotalUploadsFailed(), (int) pressureTrackerStats.totalUploadsFailed); + transferTracker = constructTracker(); + RemoteSegmentTransferTracker.Stats transferTrackerStats = transferTracker.stats(); + assertEquals(transferTracker.getShardId(), transferTrackerStats.shardId); + assertTrue(Math.abs(transferTracker.getTimeMsLag() - transferTrackerStats.refreshTimeLagMs) <= 20); + assertEquals(transferTracker.getLocalRefreshSeqNo(), (int) transferTrackerStats.localRefreshNumber); + assertEquals(transferTracker.getRemoteRefreshSeqNo(), (int) transferTrackerStats.remoteRefreshNumber); + assertEquals(transferTracker.getBytesLag(), (int) transferTrackerStats.bytesLag); + assertEquals(transferTracker.getRejectionCount(), (int) transferTrackerStats.rejectionCount); + assertEquals(transferTracker.getConsecutiveFailureCount(), (int) transferTrackerStats.consecutiveFailuresCount); + assertEquals(transferTracker.getUploadBytesStarted(), (int) transferTrackerStats.uploadBytesStarted); + assertEquals(transferTracker.getUploadBytesSucceeded(), (int) transferTrackerStats.uploadBytesSucceeded); + assertEquals(transferTracker.getUploadBytesFailed(), (int) transferTrackerStats.uploadBytesFailed); + assertEquals(transferTracker.getUploadBytesMovingAverage(), transferTrackerStats.uploadBytesMovingAverage, 0); + assertEquals(transferTracker.getUploadBytesPerSecMovingAverage(), transferTrackerStats.uploadBytesPerSecMovingAverage, 0); + assertEquals(transferTracker.getUploadTimeMovingAverage(), transferTrackerStats.uploadTimeMovingAverage, 0); + assertEquals(transferTracker.getTotalUploadsStarted(), (int) transferTrackerStats.totalUploadsStarted); + assertEquals(transferTracker.getTotalUploadsSucceeded(), (int) transferTrackerStats.totalUploadsSucceeded); + assertEquals(transferTracker.getTotalUploadsFailed(), (int) transferTrackerStats.totalUploadsFailed); } /** @@ -574,64 +552,63 @@ public void testStatsObjectCreation() { * This comes into play during internode data transfer. */ public void testStatsObjectCreationViaStream() throws IOException { - pressureTracker = constructTracker(); - RemoteSegmentTransferTracker.Stats pressureTrackerStats = pressureTracker.stats(); + transferTracker = constructTracker(); + RemoteSegmentTransferTracker.Stats transferTrackerStats = transferTracker.stats(); try (BytesStreamOutput out = new BytesStreamOutput()) { - pressureTrackerStats.writeTo(out); + transferTrackerStats.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { RemoteSegmentTransferTracker.Stats deserializedStats = new RemoteSegmentTransferTracker.Stats(in); - assertEquals(deserializedStats.shardId, pressureTrackerStats.shardId); - assertEquals((int) deserializedStats.refreshTimeLagMs, (int) pressureTrackerStats.refreshTimeLagMs); - assertEquals((int) deserializedStats.localRefreshNumber, (int) pressureTrackerStats.localRefreshNumber); - assertEquals((int) deserializedStats.remoteRefreshNumber, (int) pressureTrackerStats.remoteRefreshNumber); - assertEquals((int) deserializedStats.bytesLag, (int) pressureTrackerStats.bytesLag); - assertEquals((int) deserializedStats.rejectionCount, (int) pressureTrackerStats.rejectionCount); - assertEquals((int) deserializedStats.consecutiveFailuresCount, (int) pressureTrackerStats.consecutiveFailuresCount); - assertEquals((int) deserializedStats.uploadBytesStarted, (int) pressureTrackerStats.uploadBytesStarted); - assertEquals((int) deserializedStats.uploadBytesSucceeded, (int) pressureTrackerStats.uploadBytesSucceeded); - assertEquals((int) deserializedStats.uploadBytesFailed, (int) pressureTrackerStats.uploadBytesFailed); - assertEquals((int) deserializedStats.uploadBytesMovingAverage, pressureTrackerStats.uploadBytesMovingAverage, 0); + assertEquals(deserializedStats.shardId, transferTrackerStats.shardId); + assertEquals((int) deserializedStats.refreshTimeLagMs, (int) transferTrackerStats.refreshTimeLagMs); + assertEquals((int) deserializedStats.localRefreshNumber, (int) transferTrackerStats.localRefreshNumber); + assertEquals((int) deserializedStats.remoteRefreshNumber, (int) transferTrackerStats.remoteRefreshNumber); + assertEquals((int) deserializedStats.bytesLag, (int) transferTrackerStats.bytesLag); + assertEquals((int) deserializedStats.rejectionCount, (int) transferTrackerStats.rejectionCount); + assertEquals((int) deserializedStats.consecutiveFailuresCount, (int) transferTrackerStats.consecutiveFailuresCount); + assertEquals((int) deserializedStats.uploadBytesStarted, (int) transferTrackerStats.uploadBytesStarted); + assertEquals((int) deserializedStats.uploadBytesSucceeded, (int) transferTrackerStats.uploadBytesSucceeded); + assertEquals((int) deserializedStats.uploadBytesFailed, (int) transferTrackerStats.uploadBytesFailed); + assertEquals((int) deserializedStats.uploadBytesMovingAverage, transferTrackerStats.uploadBytesMovingAverage, 0); assertEquals( (int) deserializedStats.uploadBytesPerSecMovingAverage, - pressureTrackerStats.uploadBytesPerSecMovingAverage, + transferTrackerStats.uploadBytesPerSecMovingAverage, 0 ); - assertEquals((int) deserializedStats.uploadTimeMovingAverage, pressureTrackerStats.uploadTimeMovingAverage, 0); - assertEquals((int) deserializedStats.totalUploadsStarted, (int) pressureTrackerStats.totalUploadsStarted); - assertEquals((int) deserializedStats.totalUploadsSucceeded, (int) pressureTrackerStats.totalUploadsSucceeded); - assertEquals((int) deserializedStats.totalUploadsFailed, (int) pressureTrackerStats.totalUploadsFailed); + assertEquals((int) deserializedStats.uploadTimeMovingAverage, transferTrackerStats.uploadTimeMovingAverage, 0); + assertEquals((int) deserializedStats.totalUploadsStarted, (int) transferTrackerStats.totalUploadsStarted); + assertEquals((int) deserializedStats.totalUploadsSucceeded, (int) transferTrackerStats.totalUploadsSucceeded); + assertEquals((int) deserializedStats.totalUploadsFailed, (int) transferTrackerStats.totalUploadsFailed); assertEquals( (int) deserializedStats.directoryFileTransferTrackerStats.transferredBytesStarted, - (int) pressureTrackerStats.directoryFileTransferTrackerStats.transferredBytesStarted + (int) transferTrackerStats.directoryFileTransferTrackerStats.transferredBytesStarted ); assertEquals( (int) deserializedStats.directoryFileTransferTrackerStats.transferredBytesSucceeded, - (int) pressureTrackerStats.directoryFileTransferTrackerStats.transferredBytesSucceeded + (int) transferTrackerStats.directoryFileTransferTrackerStats.transferredBytesSucceeded ); assertEquals( (int) deserializedStats.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage, - (int) pressureTrackerStats.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage + (int) transferTrackerStats.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage ); } } } private RemoteSegmentTransferTracker constructTracker() { - RemoteSegmentTransferTracker segmentPressureTracker = new RemoteSegmentTransferTracker( + RemoteSegmentTransferTracker transferTracker = new RemoteSegmentTransferTracker( shardId, new DirectoryFileTransferTracker(), - pressureSettings.getUploadBytesMovingAverageWindowSize(), - pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), - pressureSettings.getUploadTimeMovingAverageWindowSize() + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - segmentPressureTracker.incrementTotalUploadsFailed(); - segmentPressureTracker.addUploadTimeMs(System.nanoTime() / 1_000_000L + randomIntBetween(10, 100)); - segmentPressureTracker.addUploadBytes(99); - segmentPressureTracker.updateRemoteRefreshTimeMs(System.nanoTime() / 1_000_000L + randomIntBetween(10, 100)); - segmentPressureTracker.incrementRejectionCount(); - segmentPressureTracker.getDirectoryFileTransferTracker().addTransferredBytesStarted(10); - segmentPressureTracker.getDirectoryFileTransferTracker().addTransferredBytesSucceeded(10, System.currentTimeMillis()); - segmentPressureTracker.getDirectoryFileTransferTracker().addTransferredBytesPerSec(5); - return segmentPressureTracker; + transferTracker.incrementTotalUploadsStarted(); + transferTracker.incrementTotalUploadsFailed(); + transferTracker.updateUploadTimeMovingAverage(currentTimeMsUsingSystemNanos() + randomIntBetween(10, 100)); + transferTracker.updateUploadBytesMovingAverage(99); + transferTracker.updateRemoteRefreshTimeMs(currentTimeMsUsingSystemNanos() + randomIntBetween(10, 100)); + transferTracker.incrementRejectionCount(); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesStarted(10); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesSucceeded(10, System.currentTimeMillis()); + transferTracker.getDirectoryFileTransferTracker().addTransferredBytesPerSec(5); + return transferTracker; } } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java index d79e5ae99b696..cb77174e612fd 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureServiceTests.java @@ -8,17 +8,12 @@ package org.opensearch.index.remote; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; -import org.opensearch.index.store.Store; -import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -26,10 +21,12 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import java.util.stream.IntStream; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; +import static org.opensearch.index.remote.RemoteSegmentTransferTracker.currentTimeMsUsingSystemNanos; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexShard; public class RemoteStorePressureServiceTests extends OpenSearchTestCase { @@ -41,6 +38,8 @@ public class RemoteStorePressureServiceTests extends OpenSearchTestCase { private RemoteStorePressureService pressureService; + private RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; + @Override public void setUp() throws Exception { super.setUp(); @@ -60,8 +59,9 @@ public void tearDown() throws Exception { } public void testIsSegmentsUploadBackpressureEnabled() { - pressureService = new RemoteStorePressureService(clusterService, Settings.EMPTY); - assertFalse(pressureService.isSegmentsUploadBackpressureEnabled()); + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, Settings.EMPTY); + pressureService = new RemoteStorePressureService(clusterService, Settings.EMPTY, remoteStoreStatsTrackerFactory); + assertTrue(pressureService.isSegmentsUploadBackpressureEnabled()); Settings newSettings = Settings.builder() .put(RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED.getKey(), "true") @@ -71,61 +71,51 @@ public void testIsSegmentsUploadBackpressureEnabled() { assertTrue(pressureService.isSegmentsUploadBackpressureEnabled()); } - public void testAfterIndexShardCreatedForRemoteBackedIndex() { - IndexShard indexShard = createIndexShard(shardId, true); - pressureService = new RemoteStorePressureService(clusterService, Settings.EMPTY); - pressureService.afterIndexShardCreated(indexShard); - assertNotNull(pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId())); - } - - public void testAfterIndexShardCreatedForNonRemoteBackedIndex() { - IndexShard indexShard = createIndexShard(shardId, false); - pressureService = new RemoteStorePressureService(clusterService, Settings.EMPTY); - pressureService.afterIndexShardCreated(indexShard); - assertNull(pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId())); - } - - public void testAfterIndexShardClosed() { - IndexShard indexShard = createIndexShard(shardId, true); - pressureService = new RemoteStorePressureService(clusterService, Settings.EMPTY); - pressureService.afterIndexShardCreated(indexShard); - assertNotNull(pressureService.getRemoteRefreshSegmentTracker(shardId)); - - pressureService.afterIndexShardClosed(shardId, indexShard, indexShard.indexSettings().getSettings()); - assertNull(pressureService.getRemoteRefreshSegmentTracker(shardId)); - } - - public void testValidateSegmentUploadLag() { + public void testValidateSegmentUploadLag() throws InterruptedException { // Create the pressure tracker IndexShard indexShard = createIndexShard(shardId, true); - pressureService = new RemoteStorePressureService(clusterService, Settings.EMPTY); - pressureService.afterIndexShardCreated(indexShard); + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, Settings.EMPTY); + pressureService = new RemoteStorePressureService(clusterService, Settings.EMPTY, remoteStoreStatsTrackerFactory); + remoteStoreStatsTrackerFactory.afterIndexShardCreated(indexShard); - RemoteSegmentTransferTracker pressureTracker = pressureService.getRemoteRefreshSegmentTracker(shardId); + RemoteSegmentTransferTracker pressureTracker = remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId); pressureTracker.updateLocalRefreshSeqNo(6); // 1. time lag more than dynamic threshold pressureTracker.updateRemoteRefreshSeqNo(3); AtomicLong sum = new AtomicLong(); IntStream.range(0, 20).forEach(i -> { - pressureTracker.addUploadTimeMs(i); + pressureTracker.updateUploadTimeMovingAverage(i); sum.addAndGet(i); }); double avg = (double) sum.get() / 20; - long currentMs = System.nanoTime() / 1_000_000; - pressureTracker.updateLocalRefreshTimeMs((long) (currentMs + 12 * avg)); - pressureTracker.updateRemoteRefreshTimeMs(currentMs); - Exception e = assertThrows(OpenSearchRejectedExecutionException.class, () -> pressureService.validateSegmentsUploadLag(shardId)); - assertTrue(e.getMessage().contains("due to remote segments lagging behind local segments")); - assertTrue(e.getMessage().contains("time_lag:114 ms dynamic_time_lag_threshold:95.0 ms")); - pressureTracker.updateRemoteRefreshTimeMs((long) (currentMs + 2 * avg)); + // We run this to ensure that the local and remote refresh time are not same anymore + while (pressureTracker.getLocalRefreshTimeMs() == currentTimeMsUsingSystemNanos()) { + Thread.sleep(10); + } + long localRefreshTimeMs = currentTimeMsUsingSystemNanos(); + pressureTracker.updateLocalRefreshTimeMs(localRefreshTimeMs); + + while (currentTimeMsUsingSystemNanos() - localRefreshTimeMs <= 20 * avg) { + Thread.sleep((long) (4 * avg)); + } + Exception e = assertThrows(OpenSearchRejectedExecutionException.class, () -> pressureService.validateSegmentsUploadLag(shardId)); + String regex = "^rejected execution on primary shard:\\[index]\\[0] due to remote segments lagging behind " + + "local segments.time_lag:[0-9]{2,3} ms dynamic_time_lag_threshold:95\\.0 ms$"; + Pattern pattern = Pattern.compile(regex); + Matcher matcher = pattern.matcher(e.getMessage()); + assertTrue(matcher.matches()); + + pressureTracker.updateRemoteRefreshTimeMs(pressureTracker.getLocalRefreshTimeMs()); + pressureTracker.updateLocalRefreshTimeMs(currentTimeMsUsingSystemNanos()); + Thread.sleep((long) (2 * avg)); pressureService.validateSegmentsUploadLag(shardId); // 2. bytes lag more than dynamic threshold sum.set(0); IntStream.range(0, 20).forEach(i -> { - pressureTracker.addUploadBytes(i); + pressureTracker.updateUploadBytesMovingAverage(i); sum.addAndGet(i); }); avg = (double) sum.get() / 20; @@ -142,27 +132,17 @@ public void testValidateSegmentUploadLag() { pressureService.validateSegmentsUploadLag(shardId); // 3. Consecutive failures more than the limit + IntStream.range(0, 5).forEach(ignore -> pressureTracker.incrementTotalUploadsStarted()); IntStream.range(0, 5).forEach(ignore -> pressureTracker.incrementTotalUploadsFailed()); pressureService.validateSegmentsUploadLag(shardId); + pressureTracker.incrementTotalUploadsStarted(); pressureTracker.incrementTotalUploadsFailed(); e = assertThrows(OpenSearchRejectedExecutionException.class, () -> pressureService.validateSegmentsUploadLag(shardId)); assertTrue(e.getMessage().contains("due to remote segments lagging behind local segments")); assertTrue(e.getMessage().contains("failure_streak_count:6 min_consecutive_failure_threshold:5")); + pressureTracker.incrementTotalUploadsStarted(); pressureTracker.incrementTotalUploadsSucceeded(); pressureService.validateSegmentsUploadLag(shardId); } - private static IndexShard createIndexShard(ShardId shardId, boolean remoteStoreEnabled) { - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, String.valueOf(remoteStoreEnabled)) - .build(); - IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test_index", settings); - Store store = mock(Store.class); - IndexShard indexShard = mock(IndexShard.class); - when(indexShard.indexSettings()).thenReturn(indexSettings); - when(indexShard.shardId()).thenReturn(shardId); - when(indexShard.store()).thenReturn(store); - return indexShard; - } } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java index 9c5ec69cf6be9..064c6c10eba02 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePressureSettingsTests.java @@ -15,10 +15,6 @@ import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; public class RemoteStorePressureSettingsTests extends OpenSearchTestCase { @@ -52,7 +48,7 @@ public void testGetDefaultSettings() { ); // Check remote refresh segment pressure enabled is false - assertFalse(pressureSettings.isRemoteRefreshSegmentPressureEnabled()); + assertTrue(pressureSettings.isRemoteRefreshSegmentPressureEnabled()); // Check bytes lag variance threshold default value assertEquals(10.0, pressureSettings.getBytesLagVarianceFactor(), 0.0d); @@ -62,15 +58,6 @@ public void testGetDefaultSettings() { // Check minimum consecutive failures limit default value assertEquals(5, pressureSettings.getMinConsecutiveFailuresLimit()); - - // Check upload bytes moving average window size default value - assertEquals(20, pressureSettings.getUploadBytesMovingAverageWindowSize()); - - // Check upload bytes per sec moving average window size default value - assertEquals(20, pressureSettings.getUploadBytesPerSecMovingAverageWindowSize()); - - // Check upload time moving average window size default value - assertEquals(20, pressureSettings.getUploadTimeMovingAverageWindowSize()); } public void testGetConfiguredSettings() { @@ -79,9 +66,6 @@ public void testGetConfiguredSettings() { .put(RemoteStorePressureSettings.BYTES_LAG_VARIANCE_FACTOR.getKey(), 50.0) .put(RemoteStorePressureSettings.UPLOAD_TIME_LAG_VARIANCE_FACTOR.getKey(), 60.0) .put(RemoteStorePressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT.getKey(), 121) - .put(RemoteStorePressureSettings.UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 102) - .put(RemoteStorePressureSettings.UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 103) - .put(RemoteStorePressureSettings.UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 104) .build(); RemoteStorePressureSettings pressureSettings = new RemoteStorePressureSettings( clusterService, @@ -100,15 +84,6 @@ public void testGetConfiguredSettings() { // Check minimum consecutive failures limit configured value assertEquals(121, pressureSettings.getMinConsecutiveFailuresLimit()); - - // Check upload bytes moving average window size configured value - assertEquals(102, pressureSettings.getUploadBytesMovingAverageWindowSize()); - - // Check upload bytes per sec moving average window size configured value - assertEquals(103, pressureSettings.getUploadBytesPerSecMovingAverageWindowSize()); - - // Check upload time moving average window size configured value - assertEquals(104, pressureSettings.getUploadTimeMovingAverageWindowSize()); } public void testUpdateAfterGetDefaultSettings() { @@ -123,9 +98,6 @@ public void testUpdateAfterGetDefaultSettings() { .put(RemoteStorePressureSettings.BYTES_LAG_VARIANCE_FACTOR.getKey(), 50.0) .put(RemoteStorePressureSettings.UPLOAD_TIME_LAG_VARIANCE_FACTOR.getKey(), 60.0) .put(RemoteStorePressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT.getKey(), 121) - .put(RemoteStorePressureSettings.UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 102) - .put(RemoteStorePressureSettings.UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 103) - .put(RemoteStorePressureSettings.UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 104) .build(); clusterService.getClusterSettings().applySettings(newSettings); @@ -140,15 +112,6 @@ public void testUpdateAfterGetDefaultSettings() { // Check minimum consecutive failures limit updated assertEquals(121, pressureSettings.getMinConsecutiveFailuresLimit()); - - // Check upload bytes moving average window size updated - assertEquals(102, pressureSettings.getUploadBytesMovingAverageWindowSize()); - - // Check upload bytes per sec moving average window size updated - assertEquals(103, pressureSettings.getUploadBytesPerSecMovingAverageWindowSize()); - - // Check upload time moving average window size updated - assertEquals(104, pressureSettings.getUploadTimeMovingAverageWindowSize()); } public void testUpdateAfterGetConfiguredSettings() { @@ -157,9 +120,6 @@ public void testUpdateAfterGetConfiguredSettings() { .put(RemoteStorePressureSettings.BYTES_LAG_VARIANCE_FACTOR.getKey(), 50.0) .put(RemoteStorePressureSettings.UPLOAD_TIME_LAG_VARIANCE_FACTOR.getKey(), 60.0) .put(RemoteStorePressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT.getKey(), 121) - .put(RemoteStorePressureSettings.UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 102) - .put(RemoteStorePressureSettings.UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 103) - .put(RemoteStorePressureSettings.UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 104) .build(); RemoteStorePressureSettings pressureSettings = new RemoteStorePressureSettings( clusterService, @@ -171,9 +131,6 @@ public void testUpdateAfterGetConfiguredSettings() { .put(RemoteStorePressureSettings.BYTES_LAG_VARIANCE_FACTOR.getKey(), 40.0) .put(RemoteStorePressureSettings.UPLOAD_TIME_LAG_VARIANCE_FACTOR.getKey(), 50.0) .put(RemoteStorePressureSettings.MIN_CONSECUTIVE_FAILURES_LIMIT.getKey(), 111) - .put(RemoteStorePressureSettings.UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 112) - .put(RemoteStorePressureSettings.UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 113) - .put(RemoteStorePressureSettings.UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE.getKey(), 114) .build(); clusterService.getClusterSettings().applySettings(newSettings); @@ -189,59 +146,5 @@ public void testUpdateAfterGetConfiguredSettings() { // Check minimum consecutive failures limit updated assertEquals(111, pressureSettings.getMinConsecutiveFailuresLimit()); - - // Check upload bytes moving average window size updated - assertEquals(112, pressureSettings.getUploadBytesMovingAverageWindowSize()); - - // Check upload bytes per sec moving average window size updated - assertEquals(113, pressureSettings.getUploadBytesPerSecMovingAverageWindowSize()); - - // Check upload time moving average window size updated - assertEquals(114, pressureSettings.getUploadTimeMovingAverageWindowSize()); - } - - public void testUpdateTriggeredInRemotePressureServiceOnUpdateSettings() { - - int toUpdateVal1 = 1121, toUpdateVal2 = 1123, toUpdateVal3 = 1125; - - AtomicInteger updatedUploadBytesWindowSize = new AtomicInteger(); - AtomicInteger updatedUploadBytesPerSecWindowSize = new AtomicInteger(); - AtomicInteger updatedUploadTimeWindowSize = new AtomicInteger(); - - RemoteStorePressureService pressureService = mock(RemoteStorePressureService.class); - - // Upload bytes - doAnswer(invocation -> { - updatedUploadBytesWindowSize.set(invocation.getArgument(0)); - return null; - }).when(pressureService).updateUploadBytesMovingAverageWindowSize(anyInt()); - - // Upload bytes per sec - doAnswer(invocation -> { - updatedUploadBytesPerSecWindowSize.set(invocation.getArgument(0)); - return null; - }).when(pressureService).updateUploadBytesPerSecMovingAverageWindowSize(anyInt()); - - // Upload time - doAnswer(invocation -> { - updatedUploadTimeWindowSize.set(invocation.getArgument(0)); - return null; - }).when(pressureService).updateUploadTimeMsMovingAverageWindowSize(anyInt()); - - RemoteStorePressureSettings pressureSettings = new RemoteStorePressureSettings(clusterService, Settings.EMPTY, pressureService); - Settings newSettings = Settings.builder() - .put(RemoteStorePressureSettings.UPLOAD_BYTES_MOVING_AVERAGE_WINDOW_SIZE.getKey(), toUpdateVal1) - .put(RemoteStorePressureSettings.UPLOAD_BYTES_PER_SEC_MOVING_AVERAGE_WINDOW_SIZE.getKey(), toUpdateVal2) - .put(RemoteStorePressureSettings.UPLOAD_TIME_MOVING_AVERAGE_WINDOW_SIZE.getKey(), toUpdateVal3) - .build(); - clusterService.getClusterSettings().applySettings(newSettings); - - // Assertions - assertEquals(toUpdateVal1, pressureSettings.getUploadBytesMovingAverageWindowSize()); - assertEquals(toUpdateVal1, updatedUploadBytesWindowSize.get()); - assertEquals(toUpdateVal2, pressureSettings.getUploadBytesPerSecMovingAverageWindowSize()); - assertEquals(toUpdateVal2, updatedUploadBytesPerSecWindowSize.get()); - assertEquals(toUpdateVal3, pressureSettings.getUploadTimeMovingAverageWindowSize()); - assertEquals(toUpdateVal3, updatedUploadTimeWindowSize.get()); } } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactoryTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactoryTests.java new file mode 100644 index 0000000000000..c300f316ac633 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactoryTests.java @@ -0,0 +1,119 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexShard; + +public class RemoteStoreStatsTrackerFactoryTests extends OpenSearchTestCase { + private ThreadPool threadPool; + private ClusterService clusterService; + private Settings settings; + private ShardId shardId; + private IndexShard indexShard; + private RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; + + @Override + public void setUp() throws Exception { + super.setUp(); + shardId = new ShardId("index", "uuid", 0); + indexShard = createIndexShard(shardId, true); + threadPool = new TestThreadPool(getTestName()); + settings = Settings.builder() + .put( + RemoteStoreStatsTrackerFactory.MOVING_AVERAGE_WINDOW_SIZE.getKey(), + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE + ) + .build(); + clusterService = new ClusterService(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool); + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, settings); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testAfterIndexShardCreatedForRemoteBackedIndex() { + remoteStoreStatsTrackerFactory.afterIndexShardCreated(indexShard); + assertNotNull(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId())); + } + + public void testAfterIndexShardCreatedForNonRemoteBackedIndex() { + indexShard = createIndexShard(shardId, false); + remoteStoreStatsTrackerFactory.afterIndexShardCreated(indexShard); + assertNull(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId())); + } + + public void testAfterIndexShardClosed() { + remoteStoreStatsTrackerFactory.afterIndexShardCreated(indexShard); + assertNotNull(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId)); + remoteStoreStatsTrackerFactory.afterIndexShardClosed(shardId, indexShard, indexShard.indexSettings().getSettings()); + assertNull(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId)); + } + + public void testGetConfiguredSettings() { + assertEquals( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + } + + public void testInvalidMovingAverageWindowSize() { + Settings settings = Settings.builder() + .put( + RemoteStoreStatsTrackerFactory.MOVING_AVERAGE_WINDOW_SIZE.getKey(), + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE - 1 + ) + .build(); + assertThrows( + "Failed to parse value", + IllegalArgumentException.class, + () -> new RemoteStoreStatsTrackerFactory( + new ClusterService(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool), + settings + ) + ); + } + + public void testUpdateAfterGetConfiguredSettings() { + assertEquals( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + + Settings newSettings = Settings.builder().put(RemoteStoreStatsTrackerFactory.MOVING_AVERAGE_WINDOW_SIZE.getKey(), 102).build(); + + clusterService.getClusterSettings().applySettings(newSettings); + + // Check moving average window size updated + assertEquals(102, remoteStoreStatsTrackerFactory.getMovingAverageWindowSize()); + } + + public void testGetDefaultSettings() { + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory( + new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool), + Settings.EMPTY + ); + // Check moving average window size updated + assertEquals( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE, + remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() + ); + } +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java new file mode 100644 index 0000000000000..e072d3037caad --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.store.Store; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.IndexSettingsModule; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Helper functions for Remote Store tests + */ +public class RemoteStoreTestsHelper { + static IndexShard createIndexShard(ShardId shardId, boolean remoteStoreEnabled) { + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, String.valueOf(remoteStoreEnabled)) + .build(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test_index", settings); + Store store = mock(Store.class); + IndexShard indexShard = mock(IndexShard.class); + when(indexShard.indexSettings()).thenReturn(indexSettings); + when(indexShard.shardId()).thenReturn(shardId); + when(indexShard.store()).thenReturn(store); + return indexShard; + } +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index 9afa75dd601b2..d3c7d754d6b61 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -8,10 +8,85 @@ package org.opensearch.index.remote; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.translog.transfer.TranslogTransferMetadata; import org.opensearch.test.OpenSearchTestCase; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.stream.Collectors; + +import static org.opensearch.index.remote.RemoteStoreUtils.verifyNoMultipleWriters; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; +import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; + public class RemoteStoreUtilsTests extends OpenSearchTestCase { + private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 1, + 1, + "node-1" + ); + + private final String metadataFilenameDup = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 2, + 1, + "node-2" + ); + private final String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 13, + 34, + 1, + 1, + "node-1" + ); + + private final String oldMetadataFilename = getOldSegmentMetadataFilename(12, 23, 34, 1, 1); + + /* + Gives segment metadata filename for <2.11 version + */ + public static String getOldSegmentMetadataFilename( + long primaryTerm, + long generation, + long translogGeneration, + long uploadCounter, + int metadataVersion + ) { + return String.join( + SEPARATOR, + METADATA_PREFIX, + RemoteStoreUtils.invertLong(primaryTerm), + RemoteStoreUtils.invertLong(generation), + RemoteStoreUtils.invertLong(translogGeneration), + RemoteStoreUtils.invertLong(uploadCounter), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(metadataVersion) + ); + } + + public static String getOldTranslogMetadataFilename(long primaryTerm, long generation, int metadataVersion) { + return String.join( + METADATA_SEPARATOR, + METADATA_PREFIX, + RemoteStoreUtils.invertLong(primaryTerm), + RemoteStoreUtils.invertLong(generation), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(metadataVersion) + ); + } + public void testInvertToStrInvalid() { assertThrows(IllegalArgumentException.class, () -> RemoteStoreUtils.invertLong(-1)); } @@ -60,4 +135,48 @@ public void testGetSegmentNameUnderscoreDelimiterOverrides() { public void testGetSegmentNameException() { assertThrows(IllegalArgumentException.class, () -> RemoteStoreUtils.getSegmentName("dvd")); } + + public void testVerifyMultipleWriters_Segment() { + List mdFiles = new ArrayList<>(); + mdFiles.add(metadataFilename); + mdFiles.add(metadataFilename2); + mdFiles.add(oldMetadataFilename); + verifyNoMultipleWriters(mdFiles, RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen); + + mdFiles.add(metadataFilenameDup); + assertThrows( + IllegalStateException.class, + () -> verifyNoMultipleWriters(mdFiles, RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen) + ); + } + + public void testVerifyMultipleWriters_Translog() throws InterruptedException { + TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2, "node--1"); + String mdFilename = tm.getFileName(); + Thread.sleep(1); + TranslogTransferMetadata tm2 = new TranslogTransferMetadata(1, 1, 1, 2, "node--1"); + String mdFilename2 = tm2.getFileName(); + List bmList = new LinkedList<>(); + bmList.add(new PlainBlobMetadata(mdFilename, 1)); + bmList.add(new PlainBlobMetadata(mdFilename2, 1)); + bmList.add(new PlainBlobMetadata(getOldTranslogMetadataFilename(1, 1, 1), 1)); + RemoteStoreUtils.verifyNoMultipleWriters( + bmList.stream().map(BlobMetadata::name).collect(Collectors.toList()), + TranslogTransferMetadata::getNodeIdByPrimaryTermAndGen + ); + + bmList = new LinkedList<>(); + bmList.add(new PlainBlobMetadata(mdFilename, 1)); + TranslogTransferMetadata tm3 = new TranslogTransferMetadata(1, 1, 1, 2, "node--2"); + bmList.add(new PlainBlobMetadata(tm3.getFileName(), 1)); + List finalBmList = bmList; + assertThrows( + IllegalStateException.class, + () -> RemoteStoreUtils.verifyNoMultipleWriters( + finalBmList.stream().map(BlobMetadata::name).collect(Collectors.toList()), + TranslogTransferMetadata::getNodeIdByPrimaryTermAndGen + ) + ); + } + } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteTranslogTransferTrackerTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteTranslogTransferTrackerTests.java new file mode 100644 index 0000000000000..6b6d388f725f7 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteTranslogTransferTrackerTests.java @@ -0,0 +1,383 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.io.IOException; + +public class RemoteTranslogTransferTrackerTests extends OpenSearchTestCase { + private ShardId shardId; + private RemoteTranslogTransferTracker tracker; + + @Override + public void setUp() throws Exception { + super.setUp(); + shardId = new ShardId("index", "uuid", 0); + } + + @Before + public void initTracker() { + tracker = new RemoteTranslogTransferTracker(shardId, RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE); + } + + public void testGetShardId() { + assertEquals(shardId, tracker.getShardId()); + } + + public void testAddUploadsStarted() { + populateUploadsStarted(); + } + + public void testAddUploadsFailed() { + populateUploadsStarted(); + assertEquals(0L, tracker.getTotalUploadsFailed()); + tracker.incrementTotalUploadsFailed(); + assertEquals(1L, tracker.getTotalUploadsFailed()); + tracker.incrementTotalUploadsFailed(); + assertEquals(2L, tracker.getTotalUploadsFailed()); + } + + public void testInvalidAddUploadsFailed() { + populateUploadsStarted(); + for (int i = 0; i < tracker.getTotalUploadsStarted(); i++) { + tracker.incrementTotalUploadsSucceeded(); + } + + AssertionError error = assertThrows(AssertionError.class, () -> tracker.incrementTotalUploadsFailed()); + assertTrue(error.getMessage().contains("Sum of failure count (")); + } + + public void testAddUploadsSucceeded() { + populateUploadsStarted(); + assertEquals(0L, tracker.getTotalUploadsSucceeded()); + tracker.incrementTotalUploadsSucceeded(); + assertEquals(1L, tracker.getTotalUploadsSucceeded()); + tracker.incrementTotalUploadsSucceeded(); + assertEquals(2L, tracker.getTotalUploadsSucceeded()); + } + + public void testInvalidAddUploadsSucceeded() { + populateUploadsStarted(); + for (int i = 0; i < tracker.getTotalUploadsStarted(); i++) { + tracker.incrementTotalUploadsFailed(); + } + + AssertionError error = assertThrows(AssertionError.class, () -> tracker.incrementTotalUploadsSucceeded()); + assertTrue(error.getMessage().contains("Sum of failure count (")); + } + + public void testAddUploadBytesStarted() { + populateUploadBytesStarted(); + } + + public void testAddUploadBytesFailed() { + populateUploadBytesStarted(); + assertEquals(0L, tracker.getUploadBytesFailed()); + long count1 = randomIntBetween(1, (int) tracker.getUploadBytesStarted() / 4); + tracker.addUploadBytesFailed(count1); + assertEquals(count1, tracker.getUploadBytesFailed()); + long count2 = randomIntBetween(1, (int) tracker.getUploadBytesStarted() / 4); + tracker.addUploadBytesFailed(count2); + assertEquals(count1 + count2, tracker.getUploadBytesFailed()); + } + + public void testInvalidAddUploadBytesFailed() { + populateUploadBytesStarted(); + tracker.addUploadBytesSucceeded(tracker.getUploadBytesStarted()); + AssertionError error = assertThrows(AssertionError.class, () -> tracker.addUploadBytesFailed(1L)); + assertTrue(error.getMessage().contains("Sum of failure count (")); + } + + public void testAddUploadBytesSucceeded() { + populateUploadBytesStarted(); + assertEquals(0L, tracker.getUploadBytesSucceeded()); + long count1 = randomIntBetween(1, (int) tracker.getUploadBytesStarted() / 4); + tracker.addUploadBytesSucceeded(count1); + assertEquals(count1, tracker.getUploadBytesSucceeded()); + long count2 = randomIntBetween(1, (int) tracker.getUploadBytesStarted() / 4); + tracker.addUploadBytesSucceeded(count2); + assertEquals(count1 + count2, tracker.getUploadBytesSucceeded()); + } + + public void testInvalidAddUploadBytesSucceeded() { + populateUploadBytesStarted(); + tracker.addUploadBytesFailed(tracker.getUploadBytesStarted()); + AssertionError error = assertThrows(AssertionError.class, () -> tracker.addUploadBytesSucceeded(1L)); + assertTrue(error.getMessage().contains("Sum of failure count (")); + } + + public void testAddUploadTimeInMillis() { + assertEquals(0L, tracker.getTotalUploadTimeInMillis()); + int duration1 = randomIntBetween(10, 50); + tracker.addUploadTimeInMillis(duration1); + assertEquals(duration1, tracker.getTotalUploadTimeInMillis()); + int duration2 = randomIntBetween(10, 50); + tracker.addUploadTimeInMillis(duration2); + assertEquals(duration1 + duration2, tracker.getTotalUploadTimeInMillis()); + } + + public void testSetLastSuccessfulUploadTimestamp() { + assertEquals(0, tracker.getLastSuccessfulUploadTimestamp()); + long lastUploadTimestamp = System.currentTimeMillis() + randomIntBetween(10, 100); + tracker.setLastSuccessfulUploadTimestamp(lastUploadTimestamp); + assertEquals(lastUploadTimestamp, tracker.getLastSuccessfulUploadTimestamp()); + } + + public void testUpdateUploadBytesMovingAverage() { + int movingAverageWindowSize = randomIntBetween( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE + 5 + ); + tracker = new RemoteTranslogTransferTracker(shardId, movingAverageWindowSize); + assertFalse(tracker.isUploadBytesMovingAverageReady()); + + long sum = 0; + for (int i = 1; i < movingAverageWindowSize; i++) { + tracker.updateUploadBytesMovingAverage(i); + sum += i; + assertFalse(tracker.isUploadBytesMovingAverageReady()); + assertEquals((double) sum / i, tracker.getUploadBytesMovingAverage(), 0.0d); + } + + tracker.updateUploadBytesMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(tracker.isUploadBytesMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, tracker.getUploadBytesMovingAverage(), 0.0d); + + tracker.updateUploadBytesMovingAverage(100); + sum = sum + 100 - 1; + assertEquals((double) sum / movingAverageWindowSize, tracker.getUploadBytesMovingAverage(), 0.0d); + } + + public void testUpdateUploadBytesPerSecMovingAverage() { + int movingAverageWindowSize = randomIntBetween( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE + 5 + ); + tracker = new RemoteTranslogTransferTracker(shardId, movingAverageWindowSize); + assertFalse(tracker.isUploadBytesPerSecMovingAverageReady()); + + long sum = 0; + for (int i = 1; i < movingAverageWindowSize; i++) { + tracker.updateUploadBytesPerSecMovingAverage(i); + sum += i; + assertFalse(tracker.isUploadBytesPerSecMovingAverageReady()); + assertEquals((double) sum / i, tracker.getUploadBytesPerSecMovingAverage(), 0.0d); + } + + tracker.updateUploadBytesPerSecMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(tracker.isUploadBytesPerSecMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, tracker.getUploadBytesPerSecMovingAverage(), 0.0d); + + tracker.updateUploadBytesPerSecMovingAverage(100); + sum = sum + 100 - 1; + assertEquals((double) sum / movingAverageWindowSize, tracker.getUploadBytesPerSecMovingAverage(), 0.0d); + } + + public void testUpdateUploadTimeMovingAverage() { + int movingAverageWindowSize = randomIntBetween( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE + 5 + ); + tracker = new RemoteTranslogTransferTracker(shardId, movingAverageWindowSize); + assertFalse(tracker.isUploadTimeMovingAverageReady()); + + long sum = 0; + for (int i = 1; i < movingAverageWindowSize; i++) { + tracker.updateUploadTimeMovingAverage(i); + sum += i; + assertFalse(tracker.isUploadTimeMovingAverageReady()); + assertEquals((double) sum / i, tracker.getUploadTimeMovingAverage(), 0.0d); + } + + tracker.updateUploadTimeMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(tracker.isUploadTimeMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, tracker.getUploadTimeMovingAverage(), 0.0d); + + tracker.updateUploadTimeMovingAverage(100); + sum = sum + 100 - 1; + assertEquals((double) sum / movingAverageWindowSize, tracker.getUploadTimeMovingAverage(), 0.0d); + } + + public void testAddDownloadsSucceeded() { + assertEquals(0L, tracker.getTotalDownloadsSucceeded()); + tracker.incrementDownloadsSucceeded(); + assertEquals(1L, tracker.getTotalDownloadsSucceeded()); + tracker.incrementDownloadsSucceeded(); + assertEquals(2L, tracker.getTotalDownloadsSucceeded()); + } + + public void testAddDownloadBytesSucceeded() { + assertEquals(0L, tracker.getDownloadBytesSucceeded()); + long count1 = randomIntBetween(1, 500); + tracker.addDownloadBytesSucceeded(count1); + assertEquals(count1, tracker.getDownloadBytesSucceeded()); + long count2 = randomIntBetween(1, 500); + tracker.addDownloadBytesSucceeded(count2); + assertEquals(count1 + count2, tracker.getDownloadBytesSucceeded()); + } + + public void testAddDownloadTimeInMillis() { + assertEquals(0L, tracker.getTotalDownloadTimeInMillis()); + int duration1 = randomIntBetween(10, 50); + tracker.addDownloadTimeInMillis(duration1); + assertEquals(duration1, tracker.getTotalDownloadTimeInMillis()); + int duration2 = randomIntBetween(10, 50); + tracker.addDownloadTimeInMillis(duration2); + assertEquals(duration1 + duration2, tracker.getTotalDownloadTimeInMillis()); + } + + public void testSetLastSuccessfulDownloadTimestamp() { + assertEquals(0, tracker.getLastSuccessfulDownloadTimestamp()); + long lastSuccessfulDownloadTimestamp = System.currentTimeMillis() + randomIntBetween(10, 100); + tracker.setLastSuccessfulDownloadTimestamp(lastSuccessfulDownloadTimestamp); + assertEquals(lastSuccessfulDownloadTimestamp, tracker.getLastSuccessfulDownloadTimestamp()); + } + + public void testUpdateDowmloadBytesMovingAverage() { + int movingAverageWindowSize = randomIntBetween( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE + 5 + ); + tracker = new RemoteTranslogTransferTracker(shardId, movingAverageWindowSize); + assertFalse(tracker.isDownloadBytesMovingAverageReady()); + + long sum = 0; + for (int i = 1; i < movingAverageWindowSize; i++) { + tracker.updateDownloadBytesMovingAverage(i); + sum += i; + assertFalse(tracker.isDownloadBytesMovingAverageReady()); + assertEquals((double) sum / i, tracker.getDownloadBytesMovingAverage(), 0.0d); + } + + tracker.updateDownloadBytesMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(tracker.isDownloadBytesMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, tracker.getDownloadBytesMovingAverage(), 0.0d); + + tracker.updateDownloadBytesMovingAverage(100); + sum = sum + 100 - 1; + assertEquals((double) sum / movingAverageWindowSize, tracker.getDownloadBytesMovingAverage(), 0.0d); + } + + public void testUpdateDownloadBytesPerSecMovingAverage() { + int movingAverageWindowSize = randomIntBetween( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE + 5 + ); + tracker = new RemoteTranslogTransferTracker(shardId, movingAverageWindowSize); + assertFalse(tracker.isDownloadBytesPerSecMovingAverageReady()); + + long sum = 0; + for (int i = 1; i < movingAverageWindowSize; i++) { + tracker.updateDownloadBytesPerSecMovingAverage(i); + sum += i; + assertFalse(tracker.isDownloadBytesPerSecMovingAverageReady()); + assertEquals((double) sum / i, tracker.getDownloadBytesPerSecMovingAverage(), 0.0d); + } + + tracker.updateDownloadBytesPerSecMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(tracker.isDownloadBytesPerSecMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, tracker.getDownloadBytesPerSecMovingAverage(), 0.0d); + + tracker.updateDownloadBytesPerSecMovingAverage(100); + sum = sum + 100 - 1; + assertEquals((double) sum / movingAverageWindowSize, tracker.getDownloadBytesPerSecMovingAverage(), 0.0d); + } + + public void testUpdateDownloadTimeMovingAverage() { + int movingAverageWindowSize = randomIntBetween( + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE_MIN_VALUE, + RemoteStoreStatsTrackerFactory.Defaults.MOVING_AVERAGE_WINDOW_SIZE + 5 + ); + tracker = new RemoteTranslogTransferTracker(shardId, movingAverageWindowSize); + assertFalse(tracker.isDownloadTimeMovingAverageReady()); + + long sum = 0; + for (int i = 1; i < movingAverageWindowSize; i++) { + tracker.updateDownloadTimeMovingAverage(i); + sum += i; + assertFalse(tracker.isDownloadTimeMovingAverageReady()); + assertEquals((double) sum / i, tracker.getDownloadTimeMovingAverage(), 0.0d); + } + + tracker.updateDownloadTimeMovingAverage(movingAverageWindowSize); + sum += movingAverageWindowSize; + assertTrue(tracker.isDownloadTimeMovingAverageReady()); + assertEquals((double) sum / movingAverageWindowSize, tracker.getDownloadTimeMovingAverage(), 0.0d); + + tracker.updateDownloadTimeMovingAverage(100); + sum = sum + 100 - 1; + assertEquals((double) sum / movingAverageWindowSize, tracker.getDownloadTimeMovingAverage(), 0.0d); + } + + public void testStatsObjectCreation() { + populateDummyStats(); + RemoteTranslogTransferTracker.Stats actualStats = tracker.stats(); + assertTrue(tracker.hasSameStatsAs(actualStats)); + } + + public void testStatsObjectCreationViaStream() throws IOException { + populateDummyStats(); + RemoteTranslogTransferTracker.Stats expectedStats = tracker.stats(); + try (BytesStreamOutput out = new BytesStreamOutput()) { + expectedStats.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + RemoteTranslogTransferTracker.Stats deserializedStats = new RemoteTranslogTransferTracker.Stats(in); + assertTrue(tracker.hasSameStatsAs(deserializedStats)); + } + } + } + + private void populateUploadsStarted() { + assertEquals(0L, tracker.getTotalUploadsStarted()); + tracker.incrementTotalUploadsStarted(); + assertEquals(1L, tracker.getTotalUploadsStarted()); + tracker.incrementTotalUploadsStarted(); + assertEquals(2L, tracker.getTotalUploadsStarted()); + } + + private void populateUploadBytesStarted() { + assertEquals(0L, tracker.getUploadBytesStarted()); + long count1 = randomIntBetween(500, 1000); + tracker.addUploadBytesStarted(count1); + assertEquals(count1, tracker.getUploadBytesStarted()); + long count2 = randomIntBetween(500, 1000); + tracker.addUploadBytesStarted(count2); + assertEquals(count1 + count2, tracker.getUploadBytesStarted()); + } + + private void populateDummyStats() { + int startedBytesUpload = randomIntBetween(10, 100); + tracker.addUploadBytesStarted(startedBytesUpload); + tracker.addUploadBytesFailed(randomIntBetween(1, startedBytesUpload / 2)); + tracker.addUploadBytesSucceeded(randomIntBetween(1, startedBytesUpload / 2)); + + tracker.addUploadTimeInMillis(randomIntBetween(10, 100)); + tracker.setLastSuccessfulUploadTimestamp(System.currentTimeMillis() + randomIntBetween(10, 100)); + + tracker.incrementTotalUploadsStarted(); + tracker.incrementTotalUploadsStarted(); + tracker.incrementTotalUploadsFailed(); + tracker.incrementTotalUploadsSucceeded(); + + tracker.addDownloadBytesSucceeded(randomIntBetween(10, 100)); + tracker.addDownloadTimeInMillis(randomIntBetween(10, 100)); + tracker.setLastSuccessfulDownloadTimestamp(System.currentTimeMillis() + randomIntBetween(10, 100)); + tracker.incrementDownloadsSucceeded(); + } +} diff --git a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java index 7d2d8e38d066e..c27e4bf27327a 100644 --- a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java +++ b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java @@ -32,11 +32,20 @@ package org.opensearch.index.search.stats; +import org.opensearch.action.search.SearchPhase; +import org.opensearch.action.search.SearchPhaseContext; +import org.opensearch.action.search.SearchPhaseName; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.index.search.stats.SearchStats.Stats; import org.opensearch.test.OpenSearchTestCase; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class SearchStatsTests extends OpenSearchTestCase { @@ -45,9 +54,9 @@ public void testShardLevelSearchGroupStats() throws Exception { // let's create two dummy search stats with groups Map groupStats1 = new HashMap<>(); Map groupStats2 = new HashMap<>(); - groupStats2.put("group1", new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)); - SearchStats searchStats1 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats1); - SearchStats searchStats2 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats2); + groupStats2.put("group1", new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)); + SearchStats searchStats1 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats1); + SearchStats searchStats2 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats2); // adding these two search stats and checking group stats are correct searchStats1.add(searchStats2); @@ -63,12 +72,46 @@ public void testShardLevelSearchGroupStats() throws Exception { // adding again would then return wrong search stats (would return 4! instead of 3) searchStats1.add(searchStats2); assertStats(groupStats1.get("group1"), 3); + + long paramValue = randomIntBetween(2, 50); + + // Testing for request stats + SearchRequestStats testRequestStats = new SearchRequestStats(); + SearchPhaseContext ctx = mock(SearchPhaseContext.class); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + SearchPhase mockSearchPhase = mock(SearchPhase.class); + when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); + when(mockSearchPhase.getStartTimeInNanos()).thenReturn(System.nanoTime() - TimeUnit.SECONDS.toNanos(paramValue)); + when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); + for (int iterator = 0; iterator < paramValue; iterator++) { + testRequestStats.onPhaseStart(ctx); + testRequestStats.onPhaseEnd(ctx); + } + } + searchStats1.setSearchRequestStats(testRequestStats); + for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { + assertEquals( + 0, + searchStats1.getTotal().getRequestStatsLongHolder().getRequestStatsHolder().get(searchPhaseName.getName()).current + ); + assertEquals( + paramValue, + searchStats1.getTotal().getRequestStatsLongHolder().getRequestStatsHolder().get(searchPhaseName.getName()).total + ); + assertThat( + searchStats1.getTotal().getRequestStatsLongHolder().getRequestStatsHolder().get(searchPhaseName.getName()).timeInMillis, + greaterThanOrEqualTo(paramValue) + ); + } } private static void assertStats(Stats stats, long equalTo) { assertEquals(equalTo, stats.getQueryCount()); assertEquals(equalTo, stats.getQueryTimeInMillis()); assertEquals(equalTo, stats.getQueryCurrent()); + assertEquals(equalTo, stats.getConcurrentQueryCount()); + assertEquals(equalTo, stats.getConcurrentQueryTimeInMillis()); + assertEquals(equalTo, stats.getConcurrentQueryCurrent()); assertEquals(equalTo, stats.getFetchCount()); assertEquals(equalTo, stats.getFetchTimeInMillis()); assertEquals(equalTo, stats.getFetchCurrent()); @@ -81,6 +124,7 @@ private static void assertStats(Stats stats, long equalTo) { assertEquals(equalTo, stats.getSuggestCount()); assertEquals(equalTo, stats.getSuggestTimeInMillis()); assertEquals(equalTo, stats.getSuggestCurrent()); + // avg_concurrency is not summed up across stats + assertEquals(1, stats.getConcurrentAvgSliceCount(), 0); } - } diff --git a/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java index 73073f0bb9e47..8363ea3757a2b 100644 --- a/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -45,6 +45,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndicesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -78,7 +79,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java index efa901de75c38..7971591e82bab 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java @@ -33,6 +33,7 @@ package org.opensearch.index.seqno; import org.apache.lucene.codecs.Codec; +import org.apache.lucene.util.Version; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.AllocationId; @@ -50,8 +51,10 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.SegmentReplicationShardStats; +import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.indices.replication.common.SegmentReplicationLagTimer; import org.opensearch.test.IndexSettingsModule; import java.io.IOException; @@ -1825,34 +1828,43 @@ public void testSegmentReplicationCheckpointTracking() { initializingIds.forEach(aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED)); + final StoreFileMetadata segment_1 = new StoreFileMetadata("segment_1", 1L, "abcd", Version.LATEST); + final StoreFileMetadata segment_2 = new StoreFileMetadata("segment_2", 50L, "abcd", Version.LATEST); + final StoreFileMetadata segment_3 = new StoreFileMetadata("segment_3", 100L, "abcd", Version.LATEST); final ReplicationCheckpoint initialCheckpoint = new ReplicationCheckpoint( tracker.shardId(), 0L, 1, 1, 1L, - Codec.getDefault().getName() + Codec.getDefault().getName(), + Map.of("segment_1", segment_1) ); final ReplicationCheckpoint secondCheckpoint = new ReplicationCheckpoint( tracker.shardId(), 0L, 2, 2, - 50L, - Codec.getDefault().getName() + 51L, + Codec.getDefault().getName(), + Map.of("segment_1", segment_1, "segment_2", segment_2) ); final ReplicationCheckpoint thirdCheckpoint = new ReplicationCheckpoint( tracker.shardId(), 0L, 2, 3, - 100L, - Codec.getDefault().getName() + 151L, + Codec.getDefault().getName(), + Map.of("segment_1", segment_1, "segment_2", segment_2, "segment_3", segment_3) ); tracker.setLatestReplicationCheckpoint(initialCheckpoint); + tracker.startReplicationLagTimers(initialCheckpoint); tracker.setLatestReplicationCheckpoint(secondCheckpoint); + tracker.startReplicationLagTimers(secondCheckpoint); tracker.setLatestReplicationCheckpoint(thirdCheckpoint); + tracker.startReplicationLagTimers(thirdCheckpoint); final Set expectedIds = ids(initializingIds); @@ -1860,7 +1872,8 @@ public void testSegmentReplicationCheckpointTracking() { assertEquals(expectedIds.size(), groupStats.size()); for (SegmentReplicationShardStats shardStat : groupStats) { assertEquals(3, shardStat.getCheckpointsBehindCount()); - assertEquals(100L, shardStat.getBytesBehindCount()); + assertEquals(151L, shardStat.getBytesBehindCount()); + assertTrue(shardStat.getCurrentReplicationLagMillis() >= shardStat.getCurrentReplicationTimeMillis()); } // simulate replicas moved up to date. @@ -1876,7 +1889,7 @@ public void testSegmentReplicationCheckpointTracking() { assertEquals(expectedIds.size(), groupStats.size()); for (SegmentReplicationShardStats shardStat : groupStats) { assertEquals(2, shardStat.getCheckpointsBehindCount()); - assertEquals(99L, shardStat.getBytesBehindCount()); + assertEquals(150L, shardStat.getBytesBehindCount()); } for (String id : expectedIds) { @@ -1894,6 +1907,86 @@ public void testSegmentReplicationCheckpointTracking() { } } + public void testSegmentReplicationCheckpointForRelocatingPrimary() { + Settings settings = Settings.builder().put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); + final long initialClusterStateVersion = randomNonNegativeLong(); + final int numberOfActiveAllocationsIds = randomIntBetween(2, 2); + final int numberOfInitializingIds = randomIntBetween(2, 2); + final Tuple, Set> activeAndInitializingAllocationIds = randomActiveAndInitializingAllocationIds( + numberOfActiveAllocationsIds, + numberOfInitializingIds + ); + final Set activeAllocationIds = activeAndInitializingAllocationIds.v1(); + final Set initializingIds = activeAndInitializingAllocationIds.v2(); + + AllocationId targetAllocationId = initializingIds.iterator().next(); + AllocationId primaryId = activeAllocationIds.iterator().next(); + String relocatingToNodeId = nodeIdFromAllocationId(targetAllocationId); + + logger.info("--> activeAllocationIds {} Primary {}", activeAllocationIds, primaryId.getId()); + logger.info("--> initializingIds {} Target {}", initializingIds, targetAllocationId); + + final ShardId shardId = new ShardId("test", "_na_", 0); + final IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); + for (final AllocationId initializingId : initializingIds) { + boolean primaryRelocationTarget = initializingId.equals(targetAllocationId); + builder.addShard( + TestShardRouting.newShardRouting( + shardId, + nodeIdFromAllocationId(initializingId), + null, + primaryRelocationTarget, + ShardRoutingState.INITIALIZING, + initializingId + ) + ); + } + builder.addShard( + TestShardRouting.newShardRouting( + shardId, + nodeIdFromAllocationId(primaryId), + relocatingToNodeId, + true, + ShardRoutingState.STARTED, + primaryId + ) + ); + IndexShardRoutingTable routingTable = builder.build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(activeAllocationIds), routingTable); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + assertThat(tracker.getReplicationGroup().getInSyncAllocationIds(), equalTo(ids(activeAllocationIds))); + assertThat(tracker.getReplicationGroup().getRoutingTable(), equalTo(routingTable)); + assertTrue(activeAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); + initializingIds.forEach(aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED)); + + final StoreFileMetadata segment_1 = new StoreFileMetadata("segment_1", 5L, "abcd", Version.LATEST); + final ReplicationCheckpoint initialCheckpoint = new ReplicationCheckpoint( + tracker.shardId(), + 0L, + 1, + 1, + 5L, + Codec.getDefault().getName(), + Map.of("segment_1", segment_1) + ); + tracker.setLatestReplicationCheckpoint(initialCheckpoint); + tracker.startReplicationLagTimers(initialCheckpoint); + + final Set expectedIds = initializingIds.stream() + .filter(id -> id.equals(targetAllocationId)) + .map(AllocationId::getId) + .collect(Collectors.toSet()); + + Set groupStats = tracker.getSegmentReplicationStats(); + assertEquals(expectedIds.size(), groupStats.size()); + for (SegmentReplicationShardStats shardStat : groupStats) { + assertEquals(1, shardStat.getCheckpointsBehindCount()); + assertEquals(5L, shardStat.getBytesBehindCount()); + assertTrue(shardStat.getCurrentReplicationLagMillis() >= shardStat.getCurrentReplicationTimeMillis()); + } + } + public void testSegmentReplicationCheckpointTrackingInvalidAllocationIDs() { Settings settings = Settings.builder().put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); final long initialClusterStateVersion = randomNonNegativeLong(); @@ -1933,9 +2026,11 @@ public void testSegmentReplicationCheckpointTrackingInvalidAllocationIDs() { 1, 1, 1L, - Codec.getDefault().getName() + Codec.getDefault().getName(), + Collections.emptyMap() ); tracker.setLatestReplicationCheckpoint(initialCheckpoint); + tracker.startReplicationLagTimers(initialCheckpoint); // we expect that the only returned ids from getSegmentReplicationStats will be the initializing ids we marked with // markAsTrackingAndInSyncQuietly. @@ -2161,4 +2256,15 @@ public void testIllegalStateExceptionIfUnknownAllocationIdWithRemoteTranslogEnab expectThrows(IllegalStateException.class, () -> tracker.markAllocationIdAsInSync(randomAlphaOfLength(10), randomNonNegativeLong())); } + public void testSegRepTimer() throws Throwable { + SegmentReplicationLagTimer timer = new SegmentReplicationLagTimer(); + Thread.sleep(100); + timer.start(); + Thread.sleep(100); + timer.stop(); + assertTrue("Total time since timer started should be greater than 100", timer.time() >= 100); + assertTrue("Total time since timer was created should be greater than 200", timer.totalElapsedTime() >= 200); + assertTrue("Total elapsed time should be greater than time since timer start", timer.totalElapsedTime() - timer.time() >= 100); + } + } diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java index 72a0fdaf5c77a..ed04d9a20f18e 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java @@ -49,6 +49,7 @@ import org.opensearch.index.IndexService; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -85,7 +86,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java index 9895eddb911f5..63a9ac2f2e8ec 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -49,6 +49,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.indices.SystemIndices; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -84,7 +85,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -123,7 +125,8 @@ public void testRetentionLeaseSyncActionOnPrimary() { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); final RetentionLeases retentionLeases = mock(RetentionLeases.class); final RetentionLeaseSyncAction.Request request = new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); @@ -160,7 +163,8 @@ public void testRetentionLeaseSyncActionOnReplica() throws Exception { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); final RetentionLeases retentionLeases = mock(RetentionLeases.class); final RetentionLeaseSyncAction.Request request = new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); @@ -201,7 +205,8 @@ public void testBlocks() { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); assertNull(action.indexBlockLevel()); @@ -231,7 +236,8 @@ private RetentionLeaseSyncAction createAction() { shardStateAction, new ActionFilters(Collections.emptySet()), new IndexingPressureService(Settings.EMPTY, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 9a7fefb78a06b..92681bd5e299d 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -125,6 +125,7 @@ import org.opensearch.index.mapper.VersionFieldMapper; import org.opensearch.index.remote.RemoteSegmentStats; import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLease; import org.opensearch.index.seqno.RetentionLeaseSyncer; @@ -137,6 +138,7 @@ import org.opensearch.index.store.StoreUtils; import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; +import org.opensearch.index.translog.RemoteTranslogStats; import org.opensearch.index.translog.TestTranslog; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogStats; @@ -1716,7 +1718,7 @@ public Set getPendingDeletions() throws IOException { } }; - try (Store store = createStore(shardId, new IndexSettings(metadata, Settings.EMPTY), directory)) { + try (Store store = createStore(shardId, new IndexSettings(metadata, Settings.EMPTY), directory, shardPath)) { IndexShard shard = newShard( shardRouting, shardPath, @@ -1759,9 +1761,12 @@ public void testShardStatsWithRemoteStoreEnabled() throws IOException { .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) .build() ); - RemoteSegmentTransferTracker remoteRefreshSegmentTracker = shard.getRemoteStorePressureService() - .getRemoteRefreshSegmentTracker(shard.shardId); - populateSampleRemoteStoreStats(remoteRefreshSegmentTracker); + RemoteSegmentTransferTracker remoteSegmentTransferTracker = shard.getRemoteStoreStatsTrackerFactory() + .getRemoteSegmentTransferTracker(shard.shardId); + RemoteTranslogTransferTracker remoteTranslogTransferTracker = shard.getRemoteStoreStatsTrackerFactory() + .getRemoteTranslogTransferTracker(shard.shardId); + populateSampleRemoteSegmentStats(remoteSegmentTransferTracker); + populateSampleRemoteTranslogStats(remoteTranslogTransferTracker); ShardStats shardStats = new ShardStats( shard.routingEntry(), shard.shardPath(), @@ -1771,9 +1776,9 @@ public void testShardStatsWithRemoteStoreEnabled() throws IOException { shard.getRetentionLeaseStats() ); RemoteSegmentStats remoteSegmentStats = shardStats.getStats().getSegments().getRemoteSegmentStats(); - assertEquals(remoteRefreshSegmentTracker.getUploadBytesStarted(), remoteSegmentStats.getUploadBytesStarted()); - assertEquals(remoteRefreshSegmentTracker.getUploadBytesSucceeded(), remoteSegmentStats.getUploadBytesSucceeded()); - assertEquals(remoteRefreshSegmentTracker.getUploadBytesFailed(), remoteSegmentStats.getUploadBytesFailed()); + assertRemoteSegmentStats(remoteSegmentTransferTracker, remoteSegmentStats); + RemoteTranslogStats remoteTranslogStats = shardStats.getStats().getTranslog().getRemoteTranslogStats(); + assertRemoteTranslogStats(remoteTranslogTransferTracker, remoteTranslogStats); closeShards(shard); } @@ -2678,6 +2683,7 @@ public void testRelocatedForRemoteTranslogBackedIndexWithAsyncDurability() throw AllocationId.newRelocation(routing.allocationId()) ); IndexShardTestCase.updateRoutingEntry(indexShard, routing); + indexDoc(indexShard, "_doc", "0"); assertTrue(indexShard.isSyncNeeded()); try { indexShard.relocated(routing.getTargetRelocatingShard().allocationId().getId(), primaryContext -> {}, () -> {}); @@ -2782,6 +2788,7 @@ public void testSyncSegmentsFromGivenRemoteSegmentStore() throws IOException { indexDoc(source, "_doc", "1"); indexDoc(source, "_doc", "2"); source.refresh("test"); + assertTrue("At lease one remote sync should have been completed", source.isRemoteSegmentStoreInSync()); assertDocs(source, "1", "2"); indexDoc(source, "_doc", "3"); source.refresh("test"); @@ -4844,9 +4851,46 @@ public void testRecordsForceMerges() throws IOException { closeShards(shard); } - private void populateSampleRemoteStoreStats(RemoteSegmentTransferTracker tracker) { - tracker.addUploadBytesStarted(10L); + private void populateSampleRemoteSegmentStats(RemoteSegmentTransferTracker tracker) { + tracker.addUploadBytesStarted(30L); tracker.addUploadBytesSucceeded(10L); tracker.addUploadBytesFailed(10L); + tracker.incrementRejectionCount(); + tracker.incrementRejectionCount(); + } + + private void populateSampleRemoteTranslogStats(RemoteTranslogTransferTracker tracker) { + tracker.incrementTotalUploadsStarted(); + tracker.incrementTotalUploadsStarted(); + tracker.incrementTotalUploadsStarted(); + tracker.incrementTotalUploadsSucceeded(); + tracker.incrementTotalUploadsFailed(); + int bytesStarted = randomIntBetween(100, 1000); + tracker.addUploadBytesStarted(bytesStarted); + tracker.addUploadBytesSucceeded(randomIntBetween(1, bytesStarted / 2)); + tracker.addUploadBytesFailed(randomIntBetween(1, bytesStarted / 2)); + } + + private static void assertRemoteTranslogStats( + RemoteTranslogTransferTracker remoteTranslogTransferTracker, + RemoteTranslogStats remoteTranslogStats + ) { + assertEquals(remoteTranslogTransferTracker.getTotalUploadsStarted(), remoteTranslogStats.getTotalUploadsStarted()); + assertEquals(remoteTranslogTransferTracker.getTotalUploadsSucceeded(), remoteTranslogStats.getTotalUploadsSucceeded()); + assertEquals(remoteTranslogTransferTracker.getTotalUploadsFailed(), remoteTranslogStats.getTotalUploadsFailed()); + assertEquals(remoteTranslogTransferTracker.getUploadBytesStarted(), remoteTranslogStats.getUploadBytesStarted()); + assertEquals(remoteTranslogTransferTracker.getUploadBytesSucceeded(), remoteTranslogStats.getUploadBytesSucceeded()); + assertEquals(remoteTranslogTransferTracker.getUploadBytesFailed(), remoteTranslogStats.getUploadBytesFailed()); + } + + private static void assertRemoteSegmentStats( + RemoteSegmentTransferTracker remoteSegmentTransferTracker, + RemoteSegmentStats remoteSegmentStats + ) { + assertEquals(remoteSegmentTransferTracker.getUploadBytesStarted(), remoteSegmentStats.getUploadBytesStarted()); + assertEquals(remoteSegmentTransferTracker.getUploadBytesSucceeded(), remoteSegmentStats.getUploadBytesSucceeded()); + assertEquals(remoteSegmentTransferTracker.getUploadBytesFailed(), remoteSegmentStats.getUploadBytesFailed()); + assertTrue(remoteSegmentStats.getTotalRejections() > 0); + assertEquals(remoteSegmentTransferTracker.getRejectionCount(), remoteSegmentStats.getTotalRejections()); } } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexingStatsTests.java b/server/src/test/java/org/opensearch/index/shard/IndexingStatsTests.java new file mode 100644 index 0000000000000..acf482552c260 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/IndexingStatsTests.java @@ -0,0 +1,141 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; +import java.util.concurrent.atomic.AtomicLong; + +public class IndexingStatsTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + IndexingStats stats = createTestInstance(); + + try (BytesStreamOutput out = new BytesStreamOutput()) { + stats.writeTo(out); + + try (StreamInput in = out.bytes().streamInput()) { + IndexingStats deserializedStats = new IndexingStats(in); + + if (stats.getTotal() == null) { + assertNull(deserializedStats.getTotal()); + return; + } + + IndexingStats.Stats totalStats = stats.getTotal(); + IndexingStats.Stats deserializedTotalStats = deserializedStats.getTotal(); + + assertEquals(totalStats.getIndexCount(), deserializedTotalStats.getIndexCount()); + assertEquals(totalStats.getIndexTime(), deserializedTotalStats.getIndexTime()); + assertEquals(totalStats.getIndexCurrent(), deserializedTotalStats.getIndexCurrent()); + assertEquals(totalStats.getIndexFailedCount(), deserializedTotalStats.getIndexFailedCount()); + assertEquals(totalStats.getDeleteCount(), deserializedTotalStats.getDeleteCount()); + assertEquals(totalStats.getDeleteTime(), deserializedTotalStats.getDeleteTime()); + assertEquals(totalStats.getDeleteCurrent(), deserializedTotalStats.getDeleteCurrent()); + assertEquals(totalStats.getNoopUpdateCount(), deserializedTotalStats.getNoopUpdateCount()); + assertEquals(totalStats.isThrottled(), deserializedTotalStats.isThrottled()); + assertEquals(totalStats.getThrottleTime(), deserializedTotalStats.getThrottleTime()); + + if (totalStats.getDocStatusStats() == null) { + assertNull(deserializedTotalStats.getDocStatusStats()); + return; + } + + IndexingStats.Stats.DocStatusStats docStatusStats = totalStats.getDocStatusStats(); + IndexingStats.Stats.DocStatusStats deserializedDocStatusStats = deserializedTotalStats.getDocStatusStats(); + + assertTrue( + Arrays.equals( + docStatusStats.getDocStatusCounter(), + deserializedDocStatusStats.getDocStatusCounter(), + Comparator.comparingLong(AtomicLong::longValue) + ) + ); + } + } + } + + public void testToXContentForIndexingStats() throws IOException { + IndexingStats stats = createTestInstance(); + IndexingStats.Stats totalStats = stats.getTotal(); + AtomicLong[] counter = totalStats.getDocStatusStats().getDocStatusCounter(); + + String expected = "{\"indexing\":{\"index_total\":" + + totalStats.getIndexCount() + + ",\"index_time_in_millis\":" + + totalStats.getIndexTime().getMillis() + + ",\"index_current\":" + + totalStats.getIndexCurrent() + + ",\"index_failed\":" + + totalStats.getIndexFailedCount() + + ",\"delete_total\":" + + totalStats.getDeleteCount() + + ",\"delete_time_in_millis\":" + + totalStats.getDeleteTime().getMillis() + + ",\"delete_current\":" + + totalStats.getDeleteCurrent() + + ",\"noop_update_total\":" + + totalStats.getNoopUpdateCount() + + ",\"is_throttled\":" + + totalStats.isThrottled() + + ",\"throttle_time_in_millis\":" + + totalStats.getThrottleTime().getMillis() + + ",\"doc_status\":{\"1xx\":" + + counter[0] + + ",\"2xx\":" + + counter[1] + + ",\"3xx\":" + + counter[2] + + ",\"4xx\":" + + counter[3] + + ",\"5xx\":" + + counter[4] + + "}}}"; + + XContentBuilder xContentBuilder = MediaTypeRegistry.contentBuilder(MediaTypeRegistry.JSON); + xContentBuilder.startObject(); + xContentBuilder = stats.toXContent(xContentBuilder, ToXContent.EMPTY_PARAMS); + xContentBuilder.endObject(); + + assertEquals(expected, xContentBuilder.toString()); + } + + private IndexingStats createTestInstance() { + IndexingStats.Stats.DocStatusStats docStatusStats = new IndexingStats.Stats.DocStatusStats(); + for (int i = 1; i < 6; ++i) { + docStatusStats.add(RestStatus.fromCode(i * 100), randomNonNegativeLong()); + } + + IndexingStats.Stats stats = new IndexingStats.Stats( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomBoolean(), + randomNonNegativeLong(), + docStatusStats + ); + + return new IndexingStats(stats); + } + +} diff --git a/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java index aa0386765a754..87aa1f20c5991 100644 --- a/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java @@ -133,7 +133,8 @@ public void setupListeners() throws Exception { shardId, createTempDir("translog"), indexSettings, - BigArrays.NON_RECYCLING_INSTANCE + BigArrays.NON_RECYCLING_INSTANCE, + "" ); Engine.EventListener eventListener = new Engine.EventListener() { @Override diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardCorruptionTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardCorruptionTests.java new file mode 100644 index 0000000000000..21bf580712761 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardCorruptionTests.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.test.CorruptionUtils; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.stream.Stream; + +@LuceneTestCase.SuppressFileSystems("WindowsFS") +public class RemoteIndexShardCorruptionTests extends IndexShardTestCase { + + public void testLocalDirectoryContains() throws IOException { + IndexShard indexShard = newStartedShard(true); + int numDocs = between(1, 10); + for (int i = 0; i < numDocs; i++) { + indexDoc(indexShard, "_doc", Integer.toString(i)); + } + flushShard(indexShard); + indexShard.store().incRef(); + Directory localDirectory = indexShard.store().directory(); + Path shardPath = indexShard.shardPath().getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + Path tempDir = createTempDir(); + for (String file : localDirectory.listAll()) { + if (file.equals("write.lock") || file.startsWith("extra")) { + continue; + } + boolean corrupted = randomBoolean(); + long checksum = 0; + try (IndexInput indexInput = localDirectory.openInput(file, IOContext.DEFAULT)) { + checksum = CodecUtil.retrieveChecksum(indexInput); + } + if (corrupted) { + Files.copy(shardPath.resolve(file), tempDir.resolve(file)); + try (FileChannel raf = FileChannel.open(shardPath.resolve(file), StandardOpenOption.READ, StandardOpenOption.WRITE)) { + CorruptionUtils.corruptAt(shardPath.resolve(file), raf, (int) (raf.size() - 8)); + } + } + if (corrupted == false) { + assertTrue(indexShard.localDirectoryContains(localDirectory, file, checksum)); + } else { + assertFalse(indexShard.localDirectoryContains(localDirectory, file, checksum)); + assertFalse(Files.exists(shardPath.resolve(file))); + } + } + try (Stream files = Files.list(tempDir)) { + files.forEach(p -> { + try { + Files.copy(p, shardPath.resolve(p.getFileName())); + } catch (IOException e) { + // Ignore + } + }); + } + FileSystemUtils.deleteSubDirectories(tempDir); + indexShard.store().decRef(); + closeShards(indexShard); + } +} diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java index 8e27c9ff9ae1a..20cec90d79e3e 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java @@ -12,30 +12,49 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.util.Version; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.action.ActionListener; import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.InternalEngine; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.replication.CheckpointInfoResponse; +import org.opensearch.indices.replication.GetSegmentFilesResponse; +import org.opensearch.indices.replication.RemoteStoreReplicationSource; +import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationState; +import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.CorruptionUtils; import org.hamcrest.MatcherAssert; -import org.junit.Before; +import org.junit.Assert; import java.io.IOException; +import java.nio.channels.FileChannel; import java.nio.file.Path; +import java.nio.file.StandardOpenOption; import java.util.Arrays; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.function.BiConsumer; import java.util.stream.Collectors; import static org.opensearch.index.engine.EngineTestCase.assertAtMostOneLuceneDocumentPerSequenceNumber; +import static org.opensearch.index.shard.RemoteStoreRefreshListener.EXCLUDE_FILES; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class RemoteIndexShardTests extends SegmentReplicationIndexShardTests { @@ -47,14 +66,6 @@ public class RemoteIndexShardTests extends SegmentReplicationIndexShardTests { .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, REPOSITORY_NAME) .build(); - @Before - public void setup() { - // Todo: Remove feature flag once remote store integration with segrep goes GA - FeatureFlags.initializeFeatureFlags( - Settings.builder().put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING.getKey(), "true").build() - ); - } - protected Settings getIndexSettings() { return settings; } @@ -134,10 +145,6 @@ public void testNRTReplicaWithRemoteStorePromotedAsPrimary(boolean performFlushF assertEquals(InternalEngine.class, nextPrimary.getEngine().getClass()); assertDocCounts(nextPrimary, totalDocs, totalDocs); - // As we are downloading segments from remote segment store on failover, there should not be - // any operations replayed from translog - assertEquals(prevOperationCount, nextPrimary.translogStats().estimatedNumberOfOperations()); - // refresh and push segments to our other replica. nextPrimary.refresh("test"); @@ -204,11 +211,14 @@ public void testReplicaCommitsInfosBytesOnRecovery() throws Exception { Set.of("segments_3"), primary.remoteStore().readLastCommittedSegmentsInfo().files(true) ); - MatcherAssert.assertThat( - "Segments are referenced in memory only", - primaryEngine.getSegmentInfosSnapshot().get().files(false), - containsInAnyOrder("_0.cfe", "_0.si", "_0.cfs") - ); + + try (final GatedCloseable segmentInfosSnapshot = primaryEngine.getSegmentInfosSnapshot()) { + MatcherAssert.assertThat( + "Segments are referenced in memory only", + segmentInfosSnapshot.get().files(false), + containsInAnyOrder("_0.cfe", "_0.si", "_0.cfs") + ); + } final IndexShard replica = shards.addReplica(remotePath); replica.store().createEmpty(Version.LATEST); @@ -236,13 +246,17 @@ public void testReplicaCommitsInfosBytesOnRecovery() throws Exception { MatcherAssert.assertThat( "Replica commits infos bytes referencing latest refresh point", latestReplicaCommit.files(true), - containsInAnyOrder("_0.cfe", "_0.si", "_0.cfs", "segments_5") - ); - MatcherAssert.assertThat( - "Segments are referenced in memory", - replicaEngine.getSegmentInfosSnapshot().get().files(false), - containsInAnyOrder("_0.cfe", "_0.si", "_0.cfs") + containsInAnyOrder("_0.cfe", "_0.si", "_0.cfs", "segments_6") ); + + try (final GatedCloseable segmentInfosSnapshot = replicaEngine.getSegmentInfosSnapshot()) { + MatcherAssert.assertThat( + "Segments are referenced in memory", + segmentInfosSnapshot.get().files(false), + containsInAnyOrder("_0.cfe", "_0.si", "_0.cfs") + ); + } + final Store.RecoveryDiff recoveryDiff = Store.segmentReplicationDiff( primary.getSegmentMetadataMap(), replica.getSegmentMetadataMap() @@ -294,20 +308,20 @@ public void testRepicaCleansUpOldCommitsWhenReceivingNew() throws Exception { replicateSegments(primary, shards.getReplicas()); assertDocCount(primary, 1); assertDocCount(replica, 1); - assertEquals("segments_4", replica.store().readLastCommittedSegmentsInfo().getSegmentsFileName()); - assertSingleSegmentFile(replica, "segments_4"); + assertEquals("segments_5", replica.store().readLastCommittedSegmentsInfo().getSegmentsFileName()); + assertSingleSegmentFile(replica, "segments_5"); shards.indexDocs(1); primary.refresh("test"); replicateSegments(primary, shards.getReplicas()); assertDocCount(replica, 2); - assertSingleSegmentFile(replica, "segments_4"); + assertSingleSegmentFile(replica, "segments_5"); shards.indexDocs(1); flushShard(primary); replicateSegments(primary, shards.getReplicas()); assertDocCount(replica, 3); - assertSingleSegmentFile(replica, "segments_5"); + assertSingleSegmentFile(replica, "segments_6"); final Store.RecoveryDiff diff = Store.segmentReplicationDiff(primary.getSegmentMetadataMap(), replica.getSegmentMetadataMap()); assertTrue(diff.missing.isEmpty()); @@ -343,6 +357,220 @@ public void testPrimaryRestart() throws Exception { } } + /** + * This test validates that unreferenced on disk file are ignored while requesting files from replication source to + * prevent FileAlreadyExistsException. It does so by only copying files in first round of segment replication without + * committing locally so that in next round of segment replication those files are not considered for download again + */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10885") + public void testSegRepSucceedsOnPreviousCopiedFiles() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + shards.indexDocs(10); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + when(sourceFactory.get(any())).thenReturn( + getRemoteStoreReplicationSource(replica, () -> { throw new RuntimeException("Simulated"); }) + ); + CountDownLatch latch = new CountDownLatch(1); + + // Start first round of segment replication. This should fail with simulated error but with replica having + // files in its local store but not in active reader. + final SegmentReplicationTarget target = targetService.startReplication( + replica, + primary.getLatestReplicationCheckpoint(), + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + latch.countDown(); + Assert.fail("Replication should fail with simulated error"); + } + + @Override + public void onReplicationFailure( + SegmentReplicationState state, + ReplicationFailedException e, + boolean sendShardFailure + ) { + latch.countDown(); + assertFalse(sendShardFailure); + logger.error("Replication error", e); + } + } + ); + latch.await(); + Set onDiskFiles = new HashSet<>(Arrays.asList(replica.store().directory().listAll())); + onDiskFiles.removeIf(name -> EXCLUDE_FILES.contains(name) || name.startsWith(IndexFileNames.SEGMENTS)); + List activeFiles = replica.getSegmentMetadataMap() + .values() + .stream() + .map(metadata -> metadata.name()) + .collect(Collectors.toList()); + assertTrue("Files should not be committed", activeFiles.isEmpty()); + assertEquals("Files should be copied to disk", false, onDiskFiles.isEmpty()); + assertEquals(target.state().getStage(), SegmentReplicationState.Stage.GET_FILES); + + // Start next round of segment replication and not throwing exception resulting in commit on replica + when(sourceFactory.get(any())).thenReturn(getRemoteStoreReplicationSource(replica, () -> {})); + CountDownLatch waitForSecondRound = new CountDownLatch(1); + final SegmentReplicationTarget newTarget = targetService.startReplication( + replica, + primary.getLatestReplicationCheckpoint(), + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + waitForSecondRound.countDown(); + } + + @Override + public void onReplicationFailure( + SegmentReplicationState state, + ReplicationFailedException e, + boolean sendShardFailure + ) { + waitForSecondRound.countDown(); + logger.error("Replication error", e); + Assert.fail("Replication should not fail"); + } + } + ); + waitForSecondRound.await(); + assertEquals(newTarget.state().getStage(), SegmentReplicationState.Stage.DONE); + activeFiles = replica.getSegmentMetadataMap().values().stream().map(metadata -> metadata.name()).collect(Collectors.toList()); + assertTrue("Replica should have consistent disk & reader", activeFiles.containsAll(onDiskFiles)); + shards.removeReplica(replica); + closeShards(replica); + } + } + + /** + * This test validates that local non-readable (corrupt, partially) on disk are deleted vs failing the + * replication event. This test mimics local files (not referenced by reader) by throwing exception post file copy and + * blocking update of reader. Once this is done, it corrupts one segment file and ensure that file is deleted in next + * round of segment replication by ensuring doc count. + */ + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10885") + public void testNoFailuresOnFileReads() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int docCount = 10; + shards.indexDocs(docCount); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + when(sourceFactory.get(any())).thenReturn( + getRemoteStoreReplicationSource(replica, () -> { throw new RuntimeException("Simulated"); }) + ); + CountDownLatch waitOnReplicationCompletion = new CountDownLatch(1); + + // Start first round of segment replication. This should fail with simulated error but with replica having + // files in its local store but not in active reader. + SegmentReplicationTarget segmentReplicationTarget = targetService.startReplication( + replica, + primary.getLatestReplicationCheckpoint(), + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + waitOnReplicationCompletion.countDown(); + Assert.fail("Replication should fail with simulated error"); + } + + @Override + public void onReplicationFailure( + SegmentReplicationState state, + ReplicationFailedException e, + boolean sendShardFailure + ) { + waitOnReplicationCompletion.countDown(); + assertFalse(sendShardFailure); + } + } + ); + waitOnReplicationCompletion.await(); + assertBusy(() -> { assertEquals("Target should be closed", 0, segmentReplicationTarget.refCount()); }); + String fileToCorrupt = null; + // Corrupt one data file + Path shardPath = replica.shardPath().getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + for (String file : replica.store().directory().listAll()) { + if (file.equals("write.lock") || file.startsWith("extra") || file.startsWith("segment")) { + continue; + } + fileToCorrupt = file; + logger.info("--> Corrupting file {}", fileToCorrupt); + try (FileChannel raf = FileChannel.open(shardPath.resolve(file), StandardOpenOption.READ, StandardOpenOption.WRITE)) { + CorruptionUtils.corruptAt(shardPath.resolve(file), raf, (int) (raf.size() - 8)); + } + break; + } + Assert.assertNotNull(fileToCorrupt); + + // Ingest more data and start next round of segment replication + shards.indexDocs(docCount); + primary.refresh("Post corruption"); + replicateSegments(primary, List.of(replica)); + + assertDocCount(primary, 2 * docCount); + assertDocCount(replica, 2 * docCount); + + final Store.RecoveryDiff diff = Store.segmentReplicationDiff(primary.getSegmentMetadataMap(), replica.getSegmentMetadataMap()); + assertTrue(diff.missing.isEmpty()); + assertTrue(diff.different.isEmpty()); + + // clean up + shards.removeReplica(replica); + closeShards(replica); + } + } + + private RemoteStoreReplicationSource getRemoteStoreReplicationSource(IndexShard shard, Runnable postGetFilesRunnable) { + return new RemoteStoreReplicationSource(shard) { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + super.getCheckpointMetadata(replicationId, checkpoint, listener); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + IndexShard indexShard, + BiConsumer fileProgressTracker, + ActionListener listener + ) { + super.getSegmentFiles(replicationId, checkpoint, filesToFetch, indexShard, (fileName, bytesRecovered) -> {}, listener); + postGetFilesRunnable.run(); + } + + @Override + public String getDescription() { + return "TestRemoteStoreReplicationSource"; + } + }; + } + + @Override + protected void validateShardIdleWithNoReplicas(IndexShard primary) { + // ensure search idle conditions are met. + assertFalse(primary.isSearchIdleSupported()); + assertTrue(primary.isSearchIdle()); + assertTrue(primary.scheduledRefresh()); + assertFalse(primary.hasRefreshPending()); + } + private void assertSingleSegmentFile(IndexShard shard, String fileName) throws IOException { final Set segmentsFileNames = Arrays.stream(shard.store().directory().listAll()) .filter(file -> file.startsWith(IndexFileNames.SEGMENTS)) diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 83b07e986bcc5..51814283c5eb3 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -27,7 +27,7 @@ import org.opensearch.index.engine.InternalEngineFactory; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.remote.RemoteSegmentTransferTracker; -import org.opensearch.index.remote.RemoteStorePressureService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.store.RemoteDirectory; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils; @@ -46,6 +46,7 @@ import java.util.concurrent.atomic.AtomicLong; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.METADATA_FILES_TO_FETCH; import static org.opensearch.test.RemoteStoreTestUtils.createMetadataFileBytes; import static org.opensearch.test.RemoteStoreTestUtils.getDummyMetadata; import static org.mockito.ArgumentMatchers.any; @@ -60,7 +61,7 @@ public class RemoteStoreRefreshListenerTests extends IndexShardTestCase { private IndexShard indexShard; private ClusterService clusterService; private RemoteStoreRefreshListener remoteStoreRefreshListener; - private RemoteStorePressureService remoteStorePressureService; + private RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory; public void setup(boolean primary, int numberOfDocs) throws IOException { indexShard = newStartedShard( @@ -84,9 +85,9 @@ public void setup(boolean primary, int numberOfDocs) throws IOException { new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool ); - remoteStorePressureService = new RemoteStorePressureService(clusterService, Settings.EMPTY); - remoteStorePressureService.afterIndexShardCreated(indexShard); - RemoteSegmentTransferTracker tracker = remoteStorePressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, Settings.EMPTY); + remoteStoreStatsTrackerFactory.afterIndexShardCreated(indexShard); + RemoteSegmentTransferTracker tracker = remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); remoteStoreRefreshListener = new RemoteStoreRefreshListener(indexShard, SegmentReplicationCheckpointPublisher.EMPTY, tracker); } @@ -138,21 +139,23 @@ public void testRemoteDirectoryInitThrowsException() throws IOException { return Collections.singletonList("dummy string"); } throw new IOException(); - }).when(remoteMetadataDirectory).listFilesByPrefixInLexicographicOrder(MetadataFilenameUtils.METADATA_PREFIX, 1); + }).when(remoteMetadataDirectory) + .listFilesByPrefixInLexicographicOrder(MetadataFilenameUtils.METADATA_PREFIX, METADATA_FILES_TO_FETCH); SegmentInfos segmentInfos; try (Store indexShardStore = indexShard.store()) { segmentInfos = indexShardStore.readLastCommittedSegmentsInfo(); } - when(remoteMetadataDirectory.openInput(any(), any())).thenAnswer( + when(remoteMetadataDirectory.getBlobStream(any())).thenAnswer( I -> createMetadataFileBytes(getDummyMetadata("_0", 1), indexShard.getLatestReplicationCheckpoint(), segmentInfos) ); RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( mock(RemoteDirectory.class), remoteMetadataDirectory, mock(RemoteStoreLockManager.class), - mock(ThreadPool.class) + mock(ThreadPool.class), + shardId ); FilterDirectory remoteStoreFilterDirectory = new RemoteStoreRefreshListenerTests.TestFilterDirectory( new RemoteStoreRefreshListenerTests.TestFilterDirectory(remoteSegmentStoreDirectory) @@ -162,10 +165,13 @@ public void testRemoteDirectoryInitThrowsException() throws IOException { // Since the thrown IOException is caught in the constructor, ctor should be invoked successfully. new RemoteStoreRefreshListener(shard, SegmentReplicationCheckpointPublisher.EMPTY, mock(RemoteSegmentTransferTracker.class)); - // Validate that the openInput method of remoteMetadataDirectory has been opened only once and the + // Validate that the stream of metadata file of remoteMetadataDirectory has been opened only once and the // listFilesByPrefixInLexicographicOrder has been called twice. - verify(remoteMetadataDirectory, times(1)).openInput(any(), any()); - verify(remoteMetadataDirectory, times(2)).listFilesByPrefixInLexicographicOrder(MetadataFilenameUtils.METADATA_PREFIX, 1); + verify(remoteMetadataDirectory, times(1)).getBlobStream(any()); + verify(remoteMetadataDirectory, times(2)).listFilesByPrefixInLexicographicOrder( + MetadataFilenameUtils.METADATA_PREFIX, + METADATA_FILES_TO_FETCH + ); } public void testAfterRefresh() throws IOException { @@ -261,6 +267,7 @@ public void testReplica() throws IOException { } } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/9773") public void testReplicaPromotion() throws IOException, InterruptedException { setup(false, 3); remoteStoreRefreshListener.afterRefresh(true); @@ -317,15 +324,15 @@ public void testRefreshSuccessOnFirstAttempt() throws Exception { // We spy on IndexShard.getEngine() to validate that we have successfully hit the terminal code for ascertaining successful upload. // Value has been set as 3 as during a successful upload IndexShard.getEngine() is hit thrice and with mockito we are counting down CountDownLatch successLatch = new CountDownLatch(3); - Tuple tuple = mockIndexShardWithRetryAndScheduleRefresh( + Tuple tuple = mockIndexShardWithRetryAndScheduleRefresh( succeedOnAttempt, refreshCountLatch, successLatch ); assertBusy(() -> assertEquals(0, refreshCountLatch.getCount())); assertBusy(() -> assertEquals(0, successLatch.getCount())); - RemoteStorePressureService pressureService = tuple.v2(); - RemoteSegmentTransferTracker segmentTracker = pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); + RemoteStoreStatsTrackerFactory trackerFactory = tuple.v2(); + RemoteSegmentTransferTracker segmentTracker = trackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); assertNoLagAndTotalUploadsFailed(segmentTracker, 0); } @@ -338,15 +345,15 @@ public void testRefreshSuccessOnSecondAttempt() throws Exception { // We spy on IndexShard.getEngine() to validate that we have successfully hit the terminal code for ascertaining successful upload. // Value has been set as 3 as during a successful upload IndexShard.getEngine() is hit thrice and with mockito we are counting down CountDownLatch successLatch = new CountDownLatch(3); - Tuple tuple = mockIndexShardWithRetryAndScheduleRefresh( + Tuple tuple = mockIndexShardWithRetryAndScheduleRefresh( succeedOnAttempt, refreshCountLatch, successLatch ); assertBusy(() -> assertEquals(0, refreshCountLatch.getCount())); assertBusy(() -> assertEquals(0, successLatch.getCount())); - RemoteStorePressureService pressureService = tuple.v2(); - RemoteSegmentTransferTracker segmentTracker = pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); + RemoteStoreStatsTrackerFactory trackerFactory = tuple.v2(); + RemoteSegmentTransferTracker segmentTracker = trackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); assertNoLagAndTotalUploadsFailed(segmentTracker, 1); } @@ -384,15 +391,15 @@ public void testRefreshSuccessOnThirdAttempt() throws Exception { // We spy on IndexShard.getEngine() to validate that we have successfully hit the terminal code for ascertaining successful upload. // Value has been set as 3 as during a successful upload IndexShard.getEngine() is hit thrice and with mockito we are counting down CountDownLatch successLatch = new CountDownLatch(3); - Tuple tuple = mockIndexShardWithRetryAndScheduleRefresh( + Tuple tuple = mockIndexShardWithRetryAndScheduleRefresh( succeedOnAttempt, refreshCountLatch, successLatch ); assertBusy(() -> assertEquals(0, refreshCountLatch.getCount())); assertBusy(() -> assertEquals(0, successLatch.getCount())); - RemoteStorePressureService pressureService = tuple.v2(); - RemoteSegmentTransferTracker segmentTracker = pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); + RemoteStoreStatsTrackerFactory trackerFactory = tuple.v2(); + RemoteSegmentTransferTracker segmentTracker = trackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); assertNoLagAndTotalUploadsFailed(segmentTracker, 2); } @@ -406,10 +413,10 @@ private void assertNoLagAndTotalUploadsFailed(RemoteSegmentTransferTracker segme } public void testTrackerData() throws Exception { - Tuple tuple = mockIndexShardWithRetryAndScheduleRefresh(1); + Tuple tuple = mockIndexShardWithRetryAndScheduleRefresh(1); RemoteStoreRefreshListener listener = tuple.v1(); - RemoteStorePressureService pressureService = tuple.v2(); - RemoteSegmentTransferTracker tracker = pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); + RemoteStoreStatsTrackerFactory trackerFactory = tuple.v2(); + RemoteSegmentTransferTracker tracker = trackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); assertNoLag(tracker); indexDocs(100, randomIntBetween(100, 200)); indexShard.refresh("test"); @@ -431,12 +438,13 @@ private void assertNoLag(RemoteSegmentTransferTracker tracker) { assertEquals(0, tracker.getTotalUploadsFailed()); } - private Tuple mockIndexShardWithRetryAndScheduleRefresh(int succeedOnAttempt) - throws IOException { + private Tuple mockIndexShardWithRetryAndScheduleRefresh( + int succeedOnAttempt + ) throws IOException { return mockIndexShardWithRetryAndScheduleRefresh(succeedOnAttempt, null, null); } - private Tuple mockIndexShardWithRetryAndScheduleRefresh( + private Tuple mockIndexShardWithRetryAndScheduleRefresh( int succeedOnAttempt, CountDownLatch refreshCountLatch, CountDownLatch successLatch @@ -445,7 +453,7 @@ private Tuple mockIndexS return mockIndexShardWithRetryAndScheduleRefresh(succeedOnAttempt, refreshCountLatch, successLatch, 1, noOpLatch); } - private Tuple mockIndexShardWithRetryAndScheduleRefresh( + private Tuple mockIndexShardWithRetryAndScheduleRefresh( int succeedOnAttempt, CountDownLatch refreshCountLatch, CountDownLatch successLatch, @@ -508,6 +516,13 @@ private Tuple mockIndexS return indexShard.getSegmentInfosSnapshot(); }).when(shard).getSegmentInfosSnapshot(); + doAnswer((invocation -> { + if (counter.incrementAndGet() <= succeedOnAttempt) { + throw new RuntimeException("Inducing failure in upload"); + } + return indexShard.getLatestReplicationCheckpoint(); + })).when(shard).computeReplicationCheckpoint(any()); + doAnswer(invocation -> { if (Objects.nonNull(successLatch)) { successLatch.countDown(); @@ -532,14 +547,13 @@ private Tuple mockIndexS new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool ); - RemoteStorePressureService remoteStorePressureService = new RemoteStorePressureService(clusterService, Settings.EMPTY); + RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory = indexShard.getRemoteStoreStatsTrackerFactory(); when(shard.indexSettings()).thenReturn(indexShard.indexSettings()); when(shard.shardId()).thenReturn(indexShard.shardId()); - remoteStorePressureService.afterIndexShardCreated(shard); - RemoteSegmentTransferTracker tracker = remoteStorePressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); + RemoteSegmentTransferTracker tracker = remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); RemoteStoreRefreshListener refreshListener = new RemoteStoreRefreshListener(shard, emptyCheckpointPublisher, tracker); refreshListener.afterRefresh(true); - return Tuple.tuple(refreshListener, remoteStorePressureService); + return Tuple.tuple(refreshListener, remoteStoreStatsTrackerFactory); } public static class TestFilterDirectory extends FilterDirectory { @@ -570,4 +584,5 @@ private void verifyUploadedSegments(RemoteSegmentStoreDirectory remoteSegmentSto } } } + } diff --git a/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java b/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java index 306d11843d7ff..20e263d297e45 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommandTests.java @@ -58,7 +58,7 @@ import org.opensearch.env.TestEnvironment; import org.opensearch.gateway.PersistedClusterStateService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.engine.EngineConfigFactory; import org.opensearch.index.engine.EngineException; import org.opensearch.index.engine.InternalEngineFactory; @@ -133,7 +133,7 @@ public void setup() throws IOException { final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetadata.SETTING_INDEX_UUID, shardId.getIndex().getUUID()) .build(); diff --git a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java index 15fd8b8e6d158..88cb976f2fafb 100644 --- a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java @@ -13,7 +13,6 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.NRTReplicationEngine; @@ -25,7 +24,6 @@ import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.common.ReplicationType; import org.junit.Assert; -import org.junit.Before; import java.io.IOException; import java.nio.file.Path; @@ -43,14 +41,6 @@ public class ReplicaRecoveryWithRemoteTranslogOnPrimaryTests extends OpenSearchI .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "100ms") .build(); - @Before - public void setup() { - // Todo: Remove feature flag once remote store integration with segrep goes GA - FeatureFlags.initializeFeatureFlags( - Settings.builder().put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING.getKey(), "true").build() - ); - } - public void testStartSequenceForReplicaRecovery() throws Exception { final Path remoteDir = createTempDir(); final String indexMapping = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": {} }"; diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 807b4a9cd7482..7caff3e5f5479 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -16,6 +16,9 @@ import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; @@ -28,19 +31,26 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.CancellableThreads; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.SegmentReplicationShardStats; +import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.InternalEngineFactory; import org.opensearch.index.engine.NRTReplicationEngine; import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.engine.ReadOnlyEngine; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; import org.opensearch.index.replication.TestReplicationSource; +import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.SnapshotMatchers; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.CheckpointInfoResponse; @@ -57,26 +67,38 @@ import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.repositories.IndexId; +import org.opensearch.snapshots.Snapshot; +import org.opensearch.snapshots.SnapshotId; +import org.opensearch.snapshots.SnapshotInfoTests; +import org.opensearch.snapshots.SnapshotShardsService; +import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import org.junit.Assert; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; import java.util.function.Function; import static org.opensearch.index.engine.EngineTestCase.assertAtMostOneLuceneDocumentPerSequenceNumber; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; @@ -414,13 +436,17 @@ public void testShardIdleWithNoReplicas() throws Exception { shards.startAll(); final IndexShard primary = shards.getPrimary(); shards.indexDocs(randomIntBetween(1, 10)); - // ensure search idle conditions are met. - assertTrue(primary.isSearchIdle()); - assertFalse(primary.scheduledRefresh()); - assertTrue(primary.hasRefreshPending()); + validateShardIdleWithNoReplicas(primary); } } + protected void validateShardIdleWithNoReplicas(IndexShard primary) { + // ensure search idle conditions are met. + assertTrue(primary.isSearchIdle()); + assertFalse(primary.scheduledRefresh()); + assertTrue(primary.hasRefreshPending()); + } + /** * here we are starting a new primary shard in PrimaryMode and testing if the shard publishes checkpoint after refresh. */ @@ -704,6 +730,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { // set the listener, we will only fail it once we cancel the source. @@ -773,6 +800,212 @@ public void testNoDuplicateSeqNo() throws Exception { } } + public void testQueryDuringEngineResetShowsDocs() throws Exception { + final NRTReplicationEngineFactory engineFactory = new NRTReplicationEngineFactory(); + final NRTReplicationEngineFactory spy = spy(engineFactory); + try (ReplicationGroup shards = createGroup(1, settings, indexMapping, spy, createTempDir())) { + final IndexShard primaryShard = shards.getPrimary(); + final IndexShard replicaShard = shards.getReplicas().get(0); + shards.startAll(); + shards.indexDocs(10); + shards.refresh("test"); + replicateSegments(primaryShard, shards.getReplicas()); + shards.assertAllEqual(10); + + final AtomicReference failed = new AtomicReference<>(); + doAnswer(ans -> { + try { + final Engine engineOrNull = replicaShard.getEngineOrNull(); + assertNotNull(engineOrNull); + assertTrue(engineOrNull instanceof ReadOnlyEngine); + shards.assertAllEqual(10); + } catch (Throwable e) { + failed.set(e); + } + return ans.callRealMethod(); + }).when(spy).newReadWriteEngine(any()); + shards.promoteReplicaToPrimary(replicaShard).get(); + assertNull("Expected correct doc count during engine reset", failed.get()); + } + } + + public void testSegmentReplicationStats() throws Exception { + final NRTReplicationEngineFactory engineFactory = new NRTReplicationEngineFactory(); + final NRTReplicationEngineFactory spy = spy(engineFactory); + try (ReplicationGroup shards = createGroup(1, settings, indexMapping, spy, createTempDir())) { + final IndexShard primaryShard = shards.getPrimary(); + final IndexShard replicaShard = shards.getReplicas().get(0); + shards.startAll(); + + assertReplicaCaughtUp(primaryShard); + + shards.indexDocs(10); + shards.refresh("test"); + + final ReplicationCheckpoint primaryCheckpoint = primaryShard.getLatestReplicationCheckpoint(); + final long initialCheckpointSize = primaryCheckpoint.getMetadataMap() + .values() + .stream() + .mapToLong(StoreFileMetadata::length) + .sum(); + + Set postRefreshStats = primaryShard.getReplicationStatsForTrackedReplicas(); + SegmentReplicationShardStats shardStats = postRefreshStats.stream().findFirst().get(); + assertEquals(1, shardStats.getCheckpointsBehindCount()); + assertEquals(initialCheckpointSize, shardStats.getBytesBehindCount()); + replicateSegments(primaryShard, shards.getReplicas()); + assertReplicaCaughtUp(primaryShard); + shards.assertAllEqual(10); + + final List docIdAndSeqNos = getDocIdAndSeqNos(primaryShard); + for (DocIdSeqNoAndSource docIdAndSeqNo : docIdAndSeqNos.subList(0, 5)) { + deleteDoc(primaryShard, docIdAndSeqNo.getId()); + // delete on replica for xlog. + deleteDoc(replicaShard, docIdAndSeqNo.getId()); + } + primaryShard.forceMerge(new ForceMergeRequest().maxNumSegments(1).flush(true)); + + final Map segmentMetadataMap = primaryShard.getSegmentMetadataMap(); + final Store.RecoveryDiff diff = Store.segmentReplicationDiff(segmentMetadataMap, replicaShard.getSegmentMetadataMap()); + final long sizeAfterDeleteAndCommit = diff.missing.stream().mapToLong(StoreFileMetadata::length).sum(); + + final Set statsAfterFlush = primaryShard.getReplicationStatsForTrackedReplicas(); + shardStats = statsAfterFlush.stream().findFirst().get(); + assertEquals(sizeAfterDeleteAndCommit, shardStats.getBytesBehindCount()); + assertEquals(1, shardStats.getCheckpointsBehindCount()); + + replicateSegments(primaryShard, shards.getReplicas()); + assertReplicaCaughtUp(primaryShard); + shards.assertAllEqual(5); + } + } + + public void testSnapshotWhileFailoverIncomplete() throws Exception { + final NRTReplicationEngineFactory engineFactory = new NRTReplicationEngineFactory(); + final NRTReplicationEngineFactory spy = spy(engineFactory); + try (ReplicationGroup shards = createGroup(1, settings, indexMapping, spy, createTempDir())) { + final IndexShard primaryShard = shards.getPrimary(); + final IndexShard replicaShard = shards.getReplicas().get(0); + shards.startAll(); + shards.indexDocs(10); + shards.refresh("test"); + replicateSegments(primaryShard, shards.getReplicas()); + shards.assertAllEqual(10); + + final SnapshotShardsService shardsService = getSnapshotShardsService(replicaShard); + final Snapshot snapshot = new Snapshot(randomAlphaOfLength(10), new SnapshotId(randomAlphaOfLength(5), randomAlphaOfLength(5))); + + final ClusterState initState = addSnapshotIndex(clusterService.state(), snapshot, replicaShard, SnapshotsInProgress.State.INIT); + shardsService.clusterChanged(new ClusterChangedEvent("test", initState, clusterService.state())); + + CountDownLatch latch = new CountDownLatch(1); + doAnswer(ans -> { + final Engine engineOrNull = replicaShard.getEngineOrNull(); + assertNotNull(engineOrNull); + assertTrue(engineOrNull instanceof ReadOnlyEngine); + shards.assertAllEqual(10); + shardsService.clusterChanged( + new ClusterChangedEvent( + "test", + addSnapshotIndex(clusterService.state(), snapshot, replicaShard, SnapshotsInProgress.State.STARTED), + initState + ) + ); + latch.countDown(); + return ans.callRealMethod(); + }).when(spy).newReadWriteEngine(any()); + shards.promoteReplicaToPrimary(replicaShard).get(); + latch.await(); + assertBusy(() -> { + final IndexShardSnapshotStatus.Copy copy = shardsService.currentSnapshotShards(snapshot).get(replicaShard.shardId).asCopy(); + final IndexShardSnapshotStatus.Stage stage = copy.getStage(); + assertEquals(IndexShardSnapshotStatus.Stage.FAILURE, stage); + assertNotNull(copy.getFailure()); + assertTrue( + copy.getFailure() + .contains("snapshot triggered on a new primary following failover and cannot proceed until promotion is complete") + ); + }); + } + } + + public void testReuseReplicationCheckpointWhenLatestInfosIsUnChanged() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir())) { + final IndexShard primaryShard = shards.getPrimary(); + shards.startAll(); + shards.indexDocs(10); + shards.refresh("test"); + replicateSegments(primaryShard, shards.getReplicas()); + shards.assertAllEqual(10); + final ReplicationCheckpoint latestReplicationCheckpoint = primaryShard.getLatestReplicationCheckpoint(); + try (GatedCloseable segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { + assertEquals(latestReplicationCheckpoint, primaryShard.computeReplicationCheckpoint(segmentInfosSnapshot.get())); + } + final Tuple, ReplicationCheckpoint> latestSegmentInfosAndCheckpoint = primaryShard + .getLatestSegmentInfosAndCheckpoint(); + try (final GatedCloseable closeable = latestSegmentInfosAndCheckpoint.v1()) { + assertEquals(latestReplicationCheckpoint, primaryShard.computeReplicationCheckpoint(closeable.get())); + } + } + } + + public void testComputeReplicationCheckpointNullInfosReturnsEmptyCheckpoint() throws Exception { + try (ReplicationGroup shards = createGroup(1, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir())) { + final IndexShard primaryShard = shards.getPrimary(); + assertEquals(ReplicationCheckpoint.empty(primaryShard.shardId), primaryShard.computeReplicationCheckpoint(null)); + } + } + + private SnapshotShardsService getSnapshotShardsService(IndexShard replicaShard) { + final TransportService transportService = mock(TransportService.class); + when(transportService.getThreadPool()).thenReturn(threadPool); + final IndicesService indicesService = mock(IndicesService.class); + final IndexService indexService = mock(IndexService.class); + when(indicesService.indexServiceSafe(any())).thenReturn(indexService); + when(indexService.getShardOrNull(anyInt())).thenReturn(replicaShard); + return new SnapshotShardsService(settings, clusterService, createRepositoriesService(), transportService, indicesService); + } + + private ClusterState addSnapshotIndex( + ClusterState state, + Snapshot snapshot, + IndexShard shard, + SnapshotsInProgress.State snapshotState + ) { + final Map shardsBuilder = new HashMap<>(); + ShardRouting shardRouting = shard.shardRouting; + shardsBuilder.put( + shardRouting.shardId(), + new SnapshotsInProgress.ShardSnapshotStatus(state.getNodes().getLocalNode().getId(), "1") + ); + final SnapshotsInProgress.Entry entry = new SnapshotsInProgress.Entry( + snapshot, + randomBoolean(), + false, + snapshotState, + Collections.singletonList(new IndexId(index.getName(), index.getUUID())), + Collections.emptyList(), + randomNonNegativeLong(), + randomLong(), + shardsBuilder, + null, + SnapshotInfoTests.randomUserMetadata(), + VersionUtils.randomVersion(random()), + false + ); + return ClusterState.builder(state) + .putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.of(Collections.singletonList(entry))) + .build(); + } + + private void assertReplicaCaughtUp(IndexShard primaryShard) { + Set initialStats = primaryShard.getReplicationStatsForTrackedReplicas(); + assertEquals(initialStats.size(), 1); + SegmentReplicationShardStats shardStats = initialStats.stream().findFirst().get(); + assertEquals(0, shardStats.getCheckpointsBehindCount()); + assertEquals(0, shardStats.getBytesBehindCount()); + } + /** * Assert persisted and searchable doc counts. This method should not be used while docs are concurrently indexed because * it asserts point in time seqNos are relative to the doc counts. @@ -786,17 +1019,24 @@ protected void assertDocCounts(IndexShard indexShard, int expectedPersistedDocCo } protected void resolveCheckpointInfoResponseListener(ActionListener listener, IndexShard primary) { + final CopyState copyState; try { - final CopyState copyState = new CopyState( + copyState = new CopyState( ReplicationCheckpoint.empty(primary.shardId, primary.getLatestReplicationCheckpoint().getCodec()), primary ); - listener.onResponse( - new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) - ); } catch (IOException e) { logger.error("Unexpected error computing CopyState", e); Assert.fail("Failed to compute copyState"); + throw new UncheckedIOException(e); + } + + try { + listener.onResponse( + new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) + ); + } finally { + copyState.decRef(); } } diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java index c394101697b47..f0950fe5392de 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java @@ -47,6 +47,7 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; @@ -87,6 +88,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { // randomly resolve the listener, indicating the source has resolved. @@ -131,6 +133,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { Assert.fail("Should not be reached"); @@ -176,6 +179,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { Assert.fail("Unreachable"); @@ -223,6 +227,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) {} }; @@ -269,6 +274,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java index b220b0891f11d..9e38e1749d434 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java @@ -8,12 +8,17 @@ package org.opensearch.index.store; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.action.LatchedActionListener; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.support.PlainBlobMetadata; import org.opensearch.core.action.ActionListener; import org.opensearch.test.OpenSearchTestCase; @@ -24,18 +29,24 @@ import java.nio.file.NoSuchFileException; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import java.util.stream.Stream; +import org.mockito.Mockito; + +import static org.opensearch.common.blobstore.BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -58,6 +69,85 @@ public void testListAllEmpty() throws IOException { assertArrayEquals(expectedFileName, actualFileNames); } + public void testCopyFrom() throws IOException, InterruptedException { + AtomicReference postUploadInvoked = new AtomicReference<>(false); + String filename = "_100.si"; + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + Mockito.doAnswer(invocation -> { + ActionListener completionListener = invocation.getArgument(1); + completionListener.onResponse(null); + return null; + }).when(blobContainer).asyncBlobUpload(any(WriteContext.class), any()); + + Directory storeDirectory = LuceneTestCase.newDirectory(); + IndexOutput indexOutput = storeDirectory.createOutput(filename, IOContext.DEFAULT); + indexOutput.writeString("Hello World!"); + CodecUtil.writeFooter(indexOutput); + indexOutput.close(); + storeDirectory.sync(List.of(filename)); + + CountDownLatch countDownLatch = new CountDownLatch(1); + RemoteDirectory remoteDirectory = new RemoteDirectory(blobContainer); + remoteDirectory.copyFrom( + storeDirectory, + filename, + filename, + IOContext.READ, + () -> postUploadInvoked.set(true), + new ActionListener<>() { + @Override + public void onResponse(Void t) { + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("Listener responded with exception" + e); + } + } + ); + assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); + assertTrue(postUploadInvoked.get()); + storeDirectory.close(); + } + + public void testCopyFromWithException() throws IOException, InterruptedException { + AtomicReference postUploadInvoked = new AtomicReference<>(false); + String filename = "_100.si"; + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + Mockito.doAnswer(invocation -> { + ActionListener completionListener = invocation.getArgument(1); + completionListener.onResponse(null); + return null; + }).when(blobContainer).asyncBlobUpload(any(WriteContext.class), any()); + + Directory storeDirectory = LuceneTestCase.newDirectory(); + + CountDownLatch countDownLatch = new CountDownLatch(1); + RemoteDirectory remoteDirectory = new RemoteDirectory(blobContainer); + remoteDirectory.copyFrom( + storeDirectory, + filename, + filename, + IOContext.READ, + () -> postUploadInvoked.set(true), + new ActionListener<>() { + @Override + public void onResponse(Void t) { + fail("Listener responded with success"); + } + + @Override + public void onFailure(Exception e) { + countDownLatch.countDown(); + } + } + ); + assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); + assertFalse(postUploadInvoked.get()); + storeDirectory.close(); + } + public void testListAll() throws IOException { Map fileNames = Stream.of("abc", "xyz", "pqr", "lmn", "jkl") .collect(Collectors.toMap(filename -> filename, filename -> new PlainBlobMetadata(filename, 100))); @@ -115,13 +205,29 @@ public void testCreateOutput() { public void testOpenInput() throws IOException { InputStream mockInputStream = mock(InputStream.class); when(blobContainer.readBlob("segment_1")).thenReturn(mockInputStream); - Map fileInfo = new HashMap<>(); - fileInfo.put("segment_1", new PlainBlobMetadata("segment_1", 100)); - when(blobContainer.listBlobsByPrefix("segment_1")).thenReturn(fileInfo); + + BlobMetadata blobMetadata = new PlainBlobMetadata("segment_1", 100); + + when(blobContainer.listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC)).thenReturn(List.of(blobMetadata)); IndexInput indexInput = remoteDirectory.openInput("segment_1", IOContext.DEFAULT); assertTrue(indexInput instanceof RemoteIndexInput); assertEquals(100, indexInput.length()); + verify(blobContainer).listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC); + } + + public void testOpenInputWithLength() throws IOException { + InputStream mockInputStream = mock(InputStream.class); + when(blobContainer.readBlob("segment_1")).thenReturn(mockInputStream); + + BlobMetadata blobMetadata = new PlainBlobMetadata("segment_1", 100); + + when(blobContainer.listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC)).thenReturn(List.of(blobMetadata)); + + IndexInput indexInput = remoteDirectory.openInput("segment_1", 100, IOContext.DEFAULT); + assertTrue(indexInput instanceof RemoteIndexInput); + assertEquals(100, indexInput.length()); + verify(blobContainer, times(0)).listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC); } public void testOpenInputIOException() throws IOException { @@ -139,9 +245,8 @@ public void testOpenInputNoSuchFileException() throws IOException { } public void testFileLength() throws IOException { - Map fileInfo = new HashMap<>(); - fileInfo.put("segment_1", new PlainBlobMetadata("segment_1", 100)); - when(blobContainer.listBlobsByPrefix("segment_1")).thenReturn(fileInfo); + BlobMetadata blobMetadata = new PlainBlobMetadata("segment_1", 100); + when(blobContainer.listBlobsByPrefixInSortedOrder("segment_1", 1, LEXICOGRAPHIC)).thenReturn(List.of(blobMetadata)); assertEquals(100, remoteDirectory.fileLength("segment_1")); } @@ -157,13 +262,7 @@ public void testListFilesByPrefixInLexicographicOrder() throws IOException { LatchedActionListener> latchedActionListener = invocation.getArgument(3); latchedActionListener.onResponse(List.of(new PlainBlobMetadata("metadata_1", 1))); return null; - }).when(blobContainer) - .listBlobsByPrefixInSortedOrder( - eq("metadata"), - eq(1), - eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), - any(ActionListener.class) - ); + }).when(blobContainer).listBlobsByPrefixInSortedOrder(eq("metadata"), eq(1), eq(LEXICOGRAPHIC), any(ActionListener.class)); assertEquals(List.of("metadata_1"), remoteDirectory.listFilesByPrefixInLexicographicOrder("metadata", 1)); } @@ -173,13 +272,7 @@ public void testListFilesByPrefixInLexicographicOrderEmpty() throws IOException LatchedActionListener> latchedActionListener = invocation.getArgument(3); latchedActionListener.onResponse(List.of()); return null; - }).when(blobContainer) - .listBlobsByPrefixInSortedOrder( - eq("metadata"), - eq(1), - eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), - any(ActionListener.class) - ); + }).when(blobContainer).listBlobsByPrefixInSortedOrder(eq("metadata"), eq(1), eq(LEXICOGRAPHIC), any(ActionListener.class)); assertEquals(List.of(), remoteDirectory.listFilesByPrefixInLexicographicOrder("metadata", 1)); } @@ -189,13 +282,7 @@ public void testListFilesByPrefixInLexicographicOrderException() { LatchedActionListener> latchedActionListener = invocation.getArgument(3); latchedActionListener.onFailure(new IOException("Error")); return null; - }).when(blobContainer) - .listBlobsByPrefixInSortedOrder( - eq("metadata"), - eq(1), - eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), - any(ActionListener.class) - ); + }).when(blobContainer).listBlobsByPrefixInSortedOrder(eq("metadata"), eq(1), eq(LEXICOGRAPHIC), any(ActionListener.class)); assertThrows(IOException.class, () -> remoteDirectory.listFilesByPrefixInLexicographicOrder("metadata", 1)); } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java index d7bbe52aa3905..cad5e47531cc6 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java @@ -35,6 +35,7 @@ import org.mockito.ArgumentCaptor; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.METADATA_FILES_TO_FETCH; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; @@ -78,7 +79,12 @@ public void testNewDirectory() throws IOException { latchedActionListener.onResponse(List.of()); return null; }).when(blobContainer) - .listBlobsByPrefixInSortedOrder(any(), eq(1), eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), any(ActionListener.class)); + .listBlobsByPrefixInSortedOrder( + any(), + eq(METADATA_FILES_TO_FETCH), + eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), + any(ActionListener.class) + ); when(repositoriesService.repository("remote_store_repository")).thenReturn(repository); @@ -93,7 +99,7 @@ public void testNewDirectory() throws IOException { verify(blobContainer).listBlobsByPrefixInSortedOrder( eq(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX), - eq(1), + eq(METADATA_FILES_TO_FETCH), eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), any() ); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 91154e5b77641..36cfd84ff960a 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -8,6 +8,8 @@ package org.opensearch.index.store; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; @@ -22,7 +24,7 @@ import org.apache.lucene.util.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; -import org.opensearch.common.blobstore.VerifyingMultiStreamBlobContainer; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.io.VersionedCodecStreamWrapper; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -43,6 +45,7 @@ import org.junit.After; import org.junit.Before; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.nio.file.NoSuchFileException; import java.util.ArrayList; @@ -57,9 +60,12 @@ import org.mockito.Mockito; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.METADATA_FILES_TO_FETCH; +import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; import static org.opensearch.test.RemoteStoreTestUtils.createMetadataFileBytes; import static org.opensearch.test.RemoteStoreTestUtils.getDummyMetadata; import static org.hamcrest.CoreMatchers.is; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; @@ -72,6 +78,7 @@ import static org.mockito.Mockito.when; public class RemoteSegmentStoreDirectoryTests extends IndexShardTestCase { + private static final Logger logger = LogManager.getLogger(RemoteSegmentStoreDirectoryTests.class); private RemoteDirectory remoteDataDirectory; private RemoteDirectory remoteMetadataDirectory; private RemoteStoreMetadataLockManager mdLockManager; @@ -82,9 +89,39 @@ public class RemoteSegmentStoreDirectoryTests extends IndexShardTestCase { private SegmentInfos segmentInfos; private ThreadPool threadPool; - private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 23, 34, 1, 1); - private final String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 13, 34, 1, 1); - private final String metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(10, 38, 34, 1, 1); + private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 1, + 1, + "node-1" + ); + + private final String metadataFilenameDup = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 2, + 1, + "node-2" + ); + private final String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 13, + 34, + 1, + 1, + "node-1" + ); + private final String metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 10, + 38, + 34, + 1, + 1, + "node-1" + ); @Before public void setup() throws IOException { @@ -92,13 +129,6 @@ public void setup() throws IOException { remoteMetadataDirectory = mock(RemoteDirectory.class); mdLockManager = mock(RemoteStoreMetadataLockManager.class); threadPool = mock(ThreadPool.class); - - remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( - remoteDataDirectory, - remoteMetadataDirectory, - mdLockManager, - threadPool - ); testUploadTracker = new TestUploadListener(); Settings indexSettings = Settings.builder() @@ -108,11 +138,19 @@ public void setup() throws IOException { ExecutorService executorService = OpenSearchExecutors.newDirectExecutorService(); indexShard = newStartedShard(false, indexSettings, new NRTReplicationEngineFactory()); + remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( + remoteDataDirectory, + remoteMetadataDirectory, + mdLockManager, + threadPool, + indexShard.shardId() + ); try (Store store = indexShard.store()) { segmentInfos = store.readLastCommittedSegmentsInfo(); } when(threadPool.executor(ThreadPool.Names.REMOTE_PURGE)).thenReturn(executorService); + when(threadPool.executor(ThreadPool.Names.REMOTE_RECOVERY)).thenReturn(executorService); } @After @@ -167,15 +205,13 @@ public void testUploadedSegmentMetadataFromStringException() { } public void testGetPrimaryTermGenerationUuid() { - String[] filenameTokens = "abc__9223372036854775795__9223372036854775784__uuid_xyz".split( - RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR - ); + String[] filenameTokens = "abc__9223372036854775795__9223372036854775784__uuid_xyz".split(SEPARATOR); assertEquals(12, RemoteSegmentStoreDirectory.MetadataFilenameUtils.getPrimaryTerm(filenameTokens)); assertEquals(23, RemoteSegmentStoreDirectory.MetadataFilenameUtils.getGeneration(filenameTokens)); } public void testInitException() throws IOException { - when(remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, 1)).thenThrow( + when(remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, METADATA_FILES_TO_FETCH)).thenThrow( new IOException("Error") ); @@ -194,6 +230,13 @@ public void testInitNoMetadataFile() throws IOException { assertEquals(Set.of(), actualCache.keySet()); } + public void testInitMultipleMetadataFile() throws IOException { + when(remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, METADATA_FILES_TO_FETCH)).thenReturn( + List.of(metadataFilename, metadataFilenameDup) + ); + assertThrows(IllegalStateException.class, () -> remoteSegmentStoreDirectory.init()); + } + private Map> populateMetadata() throws IOException { List metadataFiles = new ArrayList<>(); @@ -204,7 +247,7 @@ private Map> populateMetadata() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(List.of(metadataFilename)); when( @@ -223,21 +266,21 @@ private Map> populateMetadata() throws IOException { getDummyMetadata("_0", 1) ); - when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenAnswer( + when(remoteMetadataDirectory.getBlobStream(metadataFilename)).thenAnswer( I -> createMetadataFileBytes( metadataFilenameContentMapping.get(metadataFilename), indexShard.getLatestReplicationCheckpoint(), segmentInfos ) ); - when(remoteMetadataDirectory.openInput(metadataFilename2, IOContext.DEFAULT)).thenAnswer( + when(remoteMetadataDirectory.getBlobStream(metadataFilename2)).thenAnswer( I -> createMetadataFileBytes( metadataFilenameContentMapping.get(metadataFilename2), indexShard.getLatestReplicationCheckpoint(), segmentInfos ) ); - when(remoteMetadataDirectory.openInput(metadataFilename3, IOContext.DEFAULT)).thenAnswer( + when(remoteMetadataDirectory.getBlobStream(metadataFilename3)).thenAnswer( I -> createMetadataFileBytes( metadataFilenameContentMapping.get(metadataFilename3), indexShard.getLatestReplicationCheckpoint(), @@ -254,7 +297,7 @@ public void testInit() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(List.of(metadataFilename)); @@ -310,7 +353,7 @@ public void testFileLength() throws IOException { assertEquals(uploadedSegments.get("_0.si").getLength(), remoteSegmentStoreDirectory.fileLength("_0.si")); } - public void testFileLenghtNoSuchFile() throws IOException { + public void testFileLengthNoSuchFile() throws IOException { populateMetadata(); remoteSegmentStoreDirectory.init(); @@ -339,7 +382,7 @@ public void testOpenInput() throws IOException { remoteSegmentStoreDirectory.init(); IndexInput indexInput = mock(IndexInput.class); - when(remoteDataDirectory.openInput(startsWith("_0.si"), eq(IOContext.DEFAULT))).thenReturn(indexInput); + when(remoteDataDirectory.openInput(startsWith("_0.si"), anyLong(), eq(IOContext.DEFAULT))).thenReturn(indexInput); assertEquals(indexInput, remoteSegmentStoreDirectory.openInput("_0.si", IOContext.DEFAULT)); } @@ -352,7 +395,7 @@ public void testOpenInputException() throws IOException { populateMetadata(); remoteSegmentStoreDirectory.init(); - when(remoteDataDirectory.openInput(startsWith("_0.si"), eq(IOContext.DEFAULT))).thenThrow(new IOException("Error")); + when(remoteDataDirectory.openInput(startsWith("_0.si"), anyLong(), eq(IOContext.DEFAULT))).thenThrow(new IOException("Error")); assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.openInput("_0.si", IOContext.DEFAULT)); } @@ -491,7 +534,7 @@ public void testCopyFilesFromMultipart() throws Exception { assertFalse(remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().containsKey(filename)); - VerifyingMultiStreamBlobContainer blobContainer = mock(VerifyingMultiStreamBlobContainer.class); + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); when(remoteDataDirectory.getBlobContainer()).thenReturn(blobContainer); Mockito.doAnswer(invocation -> { ActionListener completionListener = invocation.getArgument(1); @@ -517,6 +560,16 @@ public void onFailure(Exception e) {} public void testCopyFilesFromMultipartIOException() throws Exception { String filename = "_100.si"; + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + remoteDataDirectory = new RemoteDirectory(blobContainer); + remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( + remoteDataDirectory, + remoteMetadataDirectory, + mdLockManager, + threadPool, + indexShard.shardId() + ); + populateMetadata(); remoteSegmentStoreDirectory.init(); @@ -528,9 +581,6 @@ public void testCopyFilesFromMultipartIOException() throws Exception { storeDirectory.sync(List.of(filename)); assertFalse(remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().containsKey(filename)); - - VerifyingMultiStreamBlobContainer blobContainer = mock(VerifyingMultiStreamBlobContainer.class); - when(remoteDataDirectory.getBlobContainer()).thenReturn(blobContainer); Mockito.doAnswer(invocation -> { ActionListener completionListener = invocation.getArgument(1); completionListener.onFailure(new Exception("Test exception")); @@ -570,7 +620,7 @@ public void testContainsFile() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -578,7 +628,7 @@ public void testContainsFile() throws IOException { metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234::512::" + Version.LATEST.major); metadata.put("_0.cfs", "_0.cfs::_0.cfs__" + UUIDs.base64UUID() + "::2345::1024::" + Version.LATEST.major); - when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn( + when(remoteMetadataDirectory.getBlobStream(metadataFilename)).thenReturn( createMetadataFileBytes(metadata, indexShard.getLatestReplicationCheckpoint(), segmentInfos) ); @@ -616,7 +666,8 @@ public void testUploadMetadataEmpty() throws IOException { segmentInfos, storeDirectory, 34L, - indexShard.getLatestReplicationCheckpoint() + indexShard.getLatestReplicationCheckpoint(), + "" ) ); } @@ -634,14 +685,14 @@ public void testUploadMetadataNonEmpty() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); Map> metadataFilenameContentMapping = Map.of( latestMetadataFileName, getDummyMetadata("_0", (int) generation) ); - when(remoteMetadataDirectory.openInput(latestMetadataFileName, IOContext.DEFAULT)).thenReturn( + when(remoteMetadataDirectory.getBlobStream(latestMetadataFileName)).thenReturn( createMetadataFileBytes( metadataFilenameContentMapping.get(latestMetadataFileName), indexShard.getLatestReplicationCheckpoint(), @@ -662,7 +713,8 @@ public void testUploadMetadataNonEmpty() throws IOException { segInfos, storeDirectory, generation, - indexShard.getLatestReplicationCheckpoint() + indexShard.getLatestReplicationCheckpoint(), + "" ); verify(remoteMetadataDirectory).copyFrom( @@ -709,7 +761,8 @@ public void testUploadMetadataMissingSegment() throws IOException { segmentInfos, storeDirectory, 12L, - indexShard.getLatestReplicationCheckpoint() + indexShard.getLatestReplicationCheckpoint(), + "" ) ); verify(indexOutput).close(); @@ -732,7 +785,7 @@ public void testNoMetadataHeaderCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -744,8 +797,8 @@ public void testNoMetadataHeaderCorruptIndexException() throws IOException { OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput("segment metadata", "metadata output stream", output, 4096); indexOutput.writeMapOfStrings(metadata); indexOutput.close(); - ByteArrayIndexInput byteArrayIndexInput = new ByteArrayIndexInput("segment metadata", BytesReference.toBytes(output.bytes())); - when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); + ByteArrayInputStream inputStream = new ByteArrayInputStream(BytesReference.toBytes(output.bytes())); + when(remoteMetadataDirectory.getBlobStream(metadataFilename)).thenReturn(inputStream); assertThrows(CorruptIndexException.class, () -> remoteSegmentStoreDirectory.init()); } @@ -755,7 +808,7 @@ public void testInvalidCodecHeaderCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -769,8 +822,8 @@ public void testInvalidCodecHeaderCorruptIndexException() throws IOException { indexOutput.writeMapOfStrings(metadata); CodecUtil.writeFooter(indexOutput); indexOutput.close(); - ByteArrayIndexInput byteArrayIndexInput = new ByteArrayIndexInput("segment metadata", BytesReference.toBytes(output.bytes())); - when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); + ByteArrayInputStream inputStream = new ByteArrayInputStream(BytesReference.toBytes(output.bytes())); + when(remoteMetadataDirectory.getBlobStream(metadataFilename)).thenReturn(inputStream); assertThrows(CorruptIndexException.class, () -> remoteSegmentStoreDirectory.init()); } @@ -780,7 +833,7 @@ public void testHeaderMinVersionCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -794,8 +847,8 @@ public void testHeaderMinVersionCorruptIndexException() throws IOException { indexOutput.writeMapOfStrings(metadata); CodecUtil.writeFooter(indexOutput); indexOutput.close(); - ByteArrayIndexInput byteArrayIndexInput = new ByteArrayIndexInput("segment metadata", BytesReference.toBytes(output.bytes())); - when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); + ByteArrayInputStream inputStream = new ByteArrayInputStream(BytesReference.toBytes(output.bytes())); + when(remoteMetadataDirectory.getBlobStream(metadataFilename)).thenReturn(inputStream); assertThrows(IndexFormatTooOldException.class, () -> remoteSegmentStoreDirectory.init()); } @@ -805,7 +858,7 @@ public void testHeaderMaxVersionCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -819,8 +872,8 @@ public void testHeaderMaxVersionCorruptIndexException() throws IOException { indexOutput.writeMapOfStrings(metadata); CodecUtil.writeFooter(indexOutput); indexOutput.close(); - ByteArrayIndexInput byteArrayIndexInput = new ByteArrayIndexInput("segment metadata", BytesReference.toBytes(output.bytes())); - when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); + ByteArrayInputStream inputStream = new ByteArrayInputStream(BytesReference.toBytes(output.bytes())); + when(remoteMetadataDirectory.getBlobStream(metadataFilename)).thenReturn(inputStream); assertThrows(IndexFormatTooNewException.class, () -> remoteSegmentStoreDirectory.init()); } @@ -830,7 +883,7 @@ public void testIncorrectChecksumCorruptIndexException() throws IOException { when( remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, - 1 + METADATA_FILES_TO_FETCH ) ).thenReturn(metadataFiles); @@ -848,8 +901,8 @@ public void testIncorrectChecksumCorruptIndexException() throws IOException { CodecUtil.writeFooter(indexOutputSpy); indexOutputSpy.close(); - ByteArrayIndexInput byteArrayIndexInput = new ByteArrayIndexInput("segment metadata", BytesReference.toBytes(output.bytes())); - when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); + ByteArrayInputStream inputStream = new ByteArrayInputStream(BytesReference.toBytes(output.bytes())); + when(remoteMetadataDirectory.getBlobStream(metadataFilename)).thenReturn(inputStream); assertThrows(CorruptIndexException.class, () -> remoteSegmentStoreDirectory.init()); } @@ -991,17 +1044,21 @@ private void indexDocs(int startDocId, int numberOfDocs) throws IOException { } public void testMetadataFileNameOrder() { - String file1 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 21, 23, 1, 1); - String file2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 38, 1, 1); - String file3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(18, 12, 26, 1, 1); - String file4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 10, 1); - String file5 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 1, 1); - String file6 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 5, 1); + String file1 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 21, 23, 1, 1, ""); + String file2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 38, 1, 1, ""); + String file3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(18, 12, 26, 1, 1, ""); + String file4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 10, 1, ""); + String file5 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 1, 1, ""); + String file6 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 5, 1, ""); List actualList = new ArrayList<>(List.of(file1, file2, file3, file4, file5, file6)); actualList.sort(String::compareTo); assertEquals(List.of(file3, file2, file4, file6, file5, file1), actualList); + + long count = file1.chars().filter(ch -> ch == SEPARATOR.charAt(0)).count(); + // There should not be any `_` in mdFile name as it is used a separator . + assertEquals(14, count); } private static class WrapperIndexOutput extends IndexOutput { diff --git a/server/src/test/java/org/opensearch/index/store/RemoteStoreFileDownloaderTests.java b/server/src/test/java/org/opensearch/index/store/RemoteStoreFileDownloaderTests.java new file mode 100644 index 0000000000000..6d8b3fe4d69fb --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/RemoteStoreFileDownloaderTests.java @@ -0,0 +1,228 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.NIOFSDirectory; +import org.opensearch.OpenSearchTimeoutException; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.EOFException; +import java.io.IOException; +import java.nio.file.NoSuchFileException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +public class RemoteStoreFileDownloaderTests extends OpenSearchTestCase { + + private ThreadPool threadPool; + private Directory source; + private Directory destination; + private Directory secondDestination; + private RemoteStoreFileDownloader fileDownloader; + private Map files = new HashMap<>(); + + @Before + public void setup() throws IOException { + final int streamLimit = randomIntBetween(1, 20); + final RecoverySettings recoverySettings = new RecoverySettings( + Settings.builder().put("indices.recovery.max_concurrent_remote_store_streams", streamLimit).build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + threadPool = new TestThreadPool(getTestName()); + source = new NIOFSDirectory(createTempDir()); + destination = new NIOFSDirectory(createTempDir()); + secondDestination = new NIOFSDirectory(createTempDir()); + for (int i = 0; i < 10; i++) { + final String filename = "file_" + i; + final int content = randomInt(); + try (IndexOutput output = source.createOutput(filename, IOContext.DEFAULT)) { + output.writeInt(content); + } + files.put(filename, content); + } + fileDownloader = new RemoteStoreFileDownloader( + ShardId.fromString("[RemoteStoreFileDownloaderTests][0]"), + threadPool, + recoverySettings + ); + } + + @After + public void stopThreadPool() throws Exception { + threadPool.shutdown(); + assertTrue(threadPool.awaitTermination(5, TimeUnit.SECONDS)); + } + + public void testDownload() throws IOException { + final PlainActionFuture l = new PlainActionFuture<>(); + fileDownloader.downloadAsync(new CancellableThreads(), source, destination, files.keySet(), l); + l.actionGet(); + assertContent(files, destination); + } + + public void testDownloadWithSecondDestination() throws IOException, InterruptedException { + fileDownloader.download(source, destination, secondDestination, files.keySet(), () -> {}); + assertContent(files, destination); + assertContent(files, secondDestination); + } + + public void testDownloadWithFileCompletionHandler() throws IOException, InterruptedException { + final AtomicInteger counter = new AtomicInteger(0); + fileDownloader.download(source, destination, null, files.keySet(), counter::incrementAndGet); + assertContent(files, destination); + assertEquals(files.size(), counter.get()); + } + + public void testDownloadNonExistentFile() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + fileDownloader.downloadAsync(new CancellableThreads(), source, destination, Set.of("not real"), new ActionListener<>() { + @Override + public void onResponse(Void unused) {} + + @Override + public void onFailure(Exception e) { + assertEquals(NoSuchFileException.class, e.getClass()); + latch.countDown(); + } + }); + assertTrue(latch.await(10, TimeUnit.SECONDS)); + } + + public void testDownloadExtraNonExistentFile() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + final List filesWithExtra = new ArrayList<>(files.keySet()); + filesWithExtra.add("not real"); + fileDownloader.downloadAsync(new CancellableThreads(), source, destination, filesWithExtra, new ActionListener<>() { + @Override + public void onResponse(Void unused) {} + + @Override + public void onFailure(Exception e) { + assertEquals(NoSuchFileException.class, e.getClass()); + latch.countDown(); + } + }); + assertTrue(latch.await(10, TimeUnit.SECONDS)); + } + + public void testCancellable() { + final CancellableThreads cancellableThreads = new CancellableThreads(); + final PlainActionFuture blockingListener = new PlainActionFuture<>(); + final Directory blockingDestination = new FilterDirectory(destination) { + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) { + try { + Thread.sleep(60_000); // Will be interrupted + fail("Expected to be interrupted"); + } catch (InterruptedException e) { + throw new RuntimeException("Failed due to interrupt", e); + } + } + }; + fileDownloader.downloadAsync(cancellableThreads, source, blockingDestination, files.keySet(), blockingListener); + assertThrows( + "Expected to timeout due to blocking directory", + OpenSearchTimeoutException.class, + () -> blockingListener.actionGet(TimeValue.timeValueMillis(500)) + ); + cancellableThreads.cancel("test"); + assertThrows( + "Expected to complete with cancellation failure", + CancellableThreads.ExecutionCancelledException.class, + blockingListener::actionGet + ); + } + + public void testBlockingCallCanBeInterrupted() throws Exception { + final Directory blockingDestination = new FilterDirectory(destination) { + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) { + try { + Thread.sleep(60_000); // Will be interrupted + fail("Expected to be interrupted"); + } catch (InterruptedException e) { + throw new RuntimeException("Failed due to interrupt", e); + } + } + }; + final AtomicReference capturedException = new AtomicReference<>(); + final Thread thread = new Thread(() -> { + try { + fileDownloader.download(source, blockingDestination, null, files.keySet(), () -> {}); + } catch (Exception e) { + capturedException.set(e); + } + }); + thread.start(); + thread.interrupt(); + thread.join(); + assertEquals(InterruptedException.class, capturedException.get().getClass()); + } + + public void testIOException() throws IOException, InterruptedException { + final Directory failureDirectory = new FilterDirectory(destination) { + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { + throw new IOException("test"); + } + }; + assertThrows(IOException.class, () -> fileDownloader.download(source, failureDirectory, null, files.keySet(), () -> {})); + + final CountDownLatch latch = new CountDownLatch(1); + fileDownloader.downloadAsync(new CancellableThreads(), source, failureDirectory, files.keySet(), new ActionListener<>() { + @Override + public void onResponse(Void unused) {} + + @Override + public void onFailure(Exception e) { + assertEquals(IOException.class, e.getClass()); + latch.countDown(); + } + }); + assertTrue(latch.await(10, TimeUnit.SECONDS)); + } + + private static void assertContent(Map expected, Directory destination) throws IOException { + // Note that Lucene will randomly write extra files (see org.apache.lucene.tests.mockfile.ExtraFS) + // so we just need to check that all the expected files are present but not that _only_ the expected + // files are present + final Set actualFiles = Set.of(destination.listAll()); + for (String file : expected.keySet()) { + assertTrue(actualFiles.contains(file)); + try (IndexInput input = destination.openInput(file, IOContext.DEFAULT)) { + assertEquals(expected.get(file), Integer.valueOf(input.readInt())); + assertThrows(EOFException.class, input::readByte); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/index/store/StoreTests.java b/server/src/test/java/org/opensearch/index/store/StoreTests.java index 8395b3e8ac08e..d7d326b325cc6 100644 --- a/server/src/test/java/org/opensearch/index/store/StoreTests.java +++ b/server/src/test/java/org/opensearch/index/store/StoreTests.java @@ -84,6 +84,7 @@ import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLease; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.shard.ShardPath; import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; @@ -798,7 +799,7 @@ public void testOnCloseCallback() throws IOException { assertEquals(shardId, theLock.getShardId()); assertEquals(lock, theLock); count.incrementAndGet(); - }); + }, null); assertEquals(count.get(), 0); final int iters = randomIntBetween(1, 10); @@ -809,6 +810,26 @@ public void testOnCloseCallback() throws IOException { assertEquals(count.get(), 1); } + public void testStoreShardPath() { + final ShardId shardId = new ShardId("index", "_na_", 1); + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) + .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(0)) + .build(); + final Path path = createTempDir().resolve(shardId.getIndex().getUUID()).resolve(String.valueOf(shardId.id())); + final ShardPath shardPath = new ShardPath(false, path, path, shardId); + final Store store = new Store( + shardId, + IndexSettingsModule.newIndexSettings("index", settings), + StoreTests.newDirectory(random()), + new DummyShardLock(shardId), + Store.OnClose.EMPTY, + shardPath + ); + assertEquals(shardPath, store.shardPath()); + store.close(); + } + public void testStoreStats() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); Settings settings = Settings.builder() diff --git a/server/src/test/java/org/opensearch/index/store/lockmanager/FileLockInfoTests.java b/server/src/test/java/org/opensearch/index/store/lockmanager/FileLockInfoTests.java index f3a2f1859923e..80413d4cb6612 100644 --- a/server/src/test/java/org/opensearch/index/store/lockmanager/FileLockInfoTests.java +++ b/server/src/test/java/org/opensearch/index/store/lockmanager/FileLockInfoTests.java @@ -15,10 +15,24 @@ public class FileLockInfoTests extends OpenSearchTestCase { String testMetadata = "testMetadata"; String testAcquirerId = "testAcquirerId"; + String testAcquirerId2 = "ZxZ4Wh89SXyEPmSYAHrIrQ"; + String testAcquirerId3 = "ZxZ4Wh89SXyEPmSYAHrItS"; + String testMetadata1 = "metadata__9223372036854775806__9223372036854775803__9223372036854775790" + + "__9223372036854775800___Hf3Dbw2QQagfGLlVBOUrg__9223370340398865071__1"; + + String oldLock = testMetadata1 + RemoteStoreLockManagerUtils.PRE_OS210_LOCK_SEPARATOR + testAcquirerId2 + + RemoteStoreLockManagerUtils.PRE_OS210_LOCK_FILE_EXTENSION; + String newLock = testMetadata1 + RemoteStoreLockManagerUtils.SEPARATOR + testAcquirerId3 + + RemoteStoreLockManagerUtils.LOCK_FILE_EXTENSION; public void testGenerateLockName() { FileLockInfo fileLockInfo = FileLockInfo.getLockInfoBuilder().withFileToLock(testMetadata).withAcquirerId(testAcquirerId).build(); assertEquals(fileLockInfo.generateLockName(), FileLockInfo.LockFileUtils.generateLockName(testMetadata, testAcquirerId)); + + // validate that lock generated will be the new version lock + fileLockInfo = FileLockInfo.getLockInfoBuilder().withFileToLock(testMetadata1).withAcquirerId(testAcquirerId3).build(); + assertEquals(fileLockInfo.generateLockName(), newLock); + } public void testGenerateLockNameFailureCase1() { @@ -41,13 +55,33 @@ public void testGetLockPrefixFailureCase() { assertThrows(IllegalArgumentException.class, fileLockInfo::getLockPrefix); } + public void testGetFileToLockNameFromLock() { + assertEquals(testMetadata1, FileLockInfo.LockFileUtils.getFileToLockNameFromLock(oldLock)); + assertEquals(testMetadata1, FileLockInfo.LockFileUtils.getFileToLockNameFromLock(newLock)); + } + + public void testGetAcquirerIdFromLock() { + assertEquals(testAcquirerId2, FileLockInfo.LockFileUtils.getAcquirerIdFromLock(oldLock)); + assertEquals(testAcquirerId3, FileLockInfo.LockFileUtils.getAcquirerIdFromLock(newLock)); + } + public void testGetLocksForAcquirer() throws NoSuchFileException { + String[] locks = new String[] { FileLockInfo.LockFileUtils.generateLockName(testMetadata, testAcquirerId), - FileLockInfo.LockFileUtils.generateLockName(testMetadata, "acquirerId2") }; + FileLockInfo.LockFileUtils.generateLockName(testMetadata, "acquirerId2"), + oldLock, + newLock }; FileLockInfo fileLockInfo = FileLockInfo.getLockInfoBuilder().withAcquirerId(testAcquirerId).build(); - assertEquals(fileLockInfo.getLockForAcquirer(locks), FileLockInfo.LockFileUtils.generateLockName(testMetadata, testAcquirerId)); + + // validate old lock + fileLockInfo = FileLockInfo.getLockInfoBuilder().withAcquirerId(testAcquirerId2).build(); + assertEquals(fileLockInfo.getLockForAcquirer(locks), oldLock); + + // validate new lock + fileLockInfo = FileLockInfo.getLockInfoBuilder().withAcquirerId(testAcquirerId3).build(); + assertEquals(fileLockInfo.getLockForAcquirer(locks), newLock); } } diff --git a/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java b/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java index 2de36574064cb..c098d11a3487f 100644 --- a/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/InternalTranslogManagerTests.java @@ -38,7 +38,7 @@ public void testRecoveryFromTranslog() throws IOException { LocalCheckpointTracker tracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); try { translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -68,7 +68,7 @@ public void testRecoveryFromTranslog() throws IOException { translogManager.syncTranslog(); translogManager.close(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -117,7 +117,7 @@ public void testTranslogRollsGeneration() throws IOException { LocalCheckpointTracker tracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); try { translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -147,7 +147,7 @@ public void testTranslogRollsGeneration() throws IOException { translogManager.syncTranslog(); translogManager.close(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -182,7 +182,7 @@ public void testTrimOperationsFromTranslog() throws IOException { LocalCheckpointTracker tracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); try { translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -214,7 +214,7 @@ public void testTrimOperationsFromTranslog() throws IOException { translogManager.close(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), @@ -253,7 +253,7 @@ public void testTranslogSync() throws IOException { ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); AtomicReference translogManagerAtomicReference = new AtomicReference<>(); translogManager = new InternalTranslogManager( - new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), + new TranslogConfig(shardId, primaryTranslogDir, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""), primaryTerm, globalCheckpoint::get, createTranslogDeletionPolicy(INDEX_SETTINGS), diff --git a/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java index e11322ab2ac63..f725783c50e52 100644 --- a/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/LocalTranslogTests.java @@ -291,7 +291,7 @@ private TranslogConfig getTranslogConfig(final Path path, final Settings setting ); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); - return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize); + return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize, ""); } private Location addToTranslogAndList(Translog translog, List list, Translog.Operation op) throws IOException { @@ -519,17 +519,18 @@ public void testStats() throws IOException { builder.startObject(); copy.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - assertThat( - builder.toString(), - equalTo( - "{\"translog\":{\"operations\":4,\"size_in_bytes\":" - + 326 - + ",\"uncommitted_operations\":4,\"uncommitted_size_in_bytes\":" - + 271 - + ",\"earliest_last_modified_age\":" - + stats.getEarliestLastModifiedAge() - + "}}" - ) + assertEquals( + "{\"translog\":{\"operations\":4,\"size_in_bytes\":" + + 326 + + ",\"uncommitted_operations\":4,\"uncommitted_size_in_bytes\":" + + 271 + + ",\"earliest_last_modified_age\":" + + stats.getEarliestLastModifiedAge() + + ",\"remote_store\":{\"upload\":{" + + "\"total_uploads\":{\"started\":0,\"failed\":0,\"succeeded\":0}," + + "\"total_upload_size\":{\"started_bytes\":0,\"failed_bytes\":0,\"succeeded_bytes\":0}" + + "}}}}", + builder.toString() ); } } @@ -1455,7 +1456,8 @@ public void testTranslogWriterCanFlushInAddOrReadCall() throws IOException { temp.getTranslogPath(), temp.getIndexSettings(), temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB) + new ByteSizeValue(1, ByteSizeUnit.KB), + "" ); final Set persistedSeqNos = new HashSet<>(); @@ -1553,7 +1555,8 @@ public void testTranslogWriterFsyncedWithLocalTranslog() throws IOException { temp.getTranslogPath(), temp.getIndexSettings(), temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB) + new ByteSizeValue(1, ByteSizeUnit.KB), + "" ); final Set persistedSeqNos = new HashSet<>(); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFSTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java similarity index 84% rename from server/src/test/java/org/opensearch/index/translog/RemoteFSTranslogTests.java rename to server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index e13c0be93c6fe..3cb65610fab58 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFSTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -40,10 +40,14 @@ import org.opensearch.env.TestEnvironment; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.MissingHistoryOperationsException; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.seqno.LocalCheckpointTracker; import org.opensearch.index.seqno.LocalCheckpointTrackerTests; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.index.translog.transfer.TranslogTransferManager; +import org.opensearch.index.translog.transfer.TranslogTransferMetadata; +import org.opensearch.index.translog.transfer.TranslogUploadFailedException; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -58,12 +62,15 @@ import java.io.Closeable; import java.io.EOFException; +import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; +import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; @@ -90,6 +97,7 @@ import java.util.zip.CheckedInputStream; import static org.opensearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; +import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING; import static org.opensearch.index.translog.RemoteFsTranslog.TRANSLOG; import static org.opensearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder; import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; @@ -97,10 +105,13 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; @LuceneTestCase.SuppressFileSystems("ExtrasFS") - -public class RemoteFSTranslogTests extends OpenSearchTestCase { +public class RemoteFsTranslogTests extends OpenSearchTestCase { protected final ShardId shardId = new ShardId("index", "_na_", 1); @@ -114,6 +125,8 @@ public class RemoteFSTranslogTests extends OpenSearchTestCase { private ThreadPool threadPool; private final static String METADATA_DIR = "metadata"; private final static String DATA_DIR = "data"; + + AtomicInteger writeCalls = new AtomicInteger(); BlobStoreRepository repository; BlobStoreTransferService blobStoreTransferService; @@ -153,13 +166,13 @@ public void tearDown() throws Exception { private RemoteFsTranslog create(Path path) throws IOException { final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); - return create(path, createRepository(), translogUUID); + return create(path, createRepository(), translogUUID, 0); } - private RemoteFsTranslog create(Path path, BlobStoreRepository repository, String translogUUID) throws IOException { + private RemoteFsTranslog create(Path path, BlobStoreRepository repository, String translogUUID, int extraGenToKeep) throws IOException { this.repository = repository; globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - final TranslogConfig translogConfig = getTranslogConfig(path); + final TranslogConfig translogConfig = getTranslogConfig(path, extraGenToKeep); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); threadPool = new TestThreadPool(getClass().getName()); blobStoreTransferService = new BlobStoreTransferService(repository.blobStore(), threadPool); @@ -172,12 +185,20 @@ private RemoteFsTranslog create(Path path, BlobStoreRepository repository, Strin getPersistedSeqNoConsumer(), repository, threadPool, - primaryMode::get + primaryMode::get, + new RemoteTranslogTransferTracker(shardId, 10) ); + } + private RemoteFsTranslog create(Path path, BlobStoreRepository repository, String translogUUID) throws IOException { + return create(path, repository, translogUUID, 0); } private TranslogConfig getTranslogConfig(final Path path) { + return getTranslogConfig(path, 0); + } + + private TranslogConfig getTranslogConfig(final Path path, int gensToKeep) { final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) // only randomize between nog age retention and a long one, so failures will have a chance of reproducing @@ -185,6 +206,7 @@ private TranslogConfig getTranslogConfig(final Path path) { .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), randomIntBetween(-1, 2048) + "b") .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.getKey(), gensToKeep) .build(); return getTranslogConfig(path, settings); } @@ -197,7 +219,7 @@ private TranslogConfig getTranslogConfig(final Path path, final Settings setting ); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); - return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize); + return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize, ""); } private BlobStoreRepository createRepository() { @@ -256,6 +278,43 @@ private Translog.Location addToTranslogAndListAndUpload(Translog translog, List< return loc; } + private static void assertUploadStatsNoFailures(RemoteTranslogTransferTracker statsTracker) { + assertTrue(statsTracker.getUploadBytesStarted() > 0); + assertTrue(statsTracker.getTotalUploadsStarted() > 0); + assertEquals(0, statsTracker.getUploadBytesFailed()); + assertEquals(0, statsTracker.getTotalUploadsFailed()); + assertTrue(statsTracker.getUploadBytesSucceeded() > 0); + assertTrue(statsTracker.getTotalUploadsSucceeded() > 0); + assertTrue(statsTracker.getTotalUploadTimeInMillis() > 0); + assertTrue(statsTracker.getLastSuccessfulUploadTimestamp() > 0); + } + + private static void assertUploadStatsNoUploads(RemoteTranslogTransferTracker statsTracker) { + assertEquals(0, statsTracker.getUploadBytesStarted()); + assertEquals(0, statsTracker.getUploadBytesFailed()); + assertEquals(0, statsTracker.getUploadBytesSucceeded()); + assertEquals(0, statsTracker.getTotalUploadsStarted()); + assertEquals(0, statsTracker.getTotalUploadsFailed()); + assertEquals(0, statsTracker.getTotalUploadsSucceeded()); + assertEquals(0, statsTracker.getTotalUploadTimeInMillis()); + assertEquals(0, statsTracker.getLastSuccessfulUploadTimestamp()); + } + + private static void assertDownloadStatsPopulated(RemoteTranslogTransferTracker statsTracker) { + assertTrue(statsTracker.getDownloadBytesSucceeded() > 0); + assertTrue(statsTracker.getTotalDownloadsSucceeded() > 0); + // TODO: Need to simulate a delay for this assertion to avoid flakiness + // assertTrue(statsTracker.getTotalDownloadTimeInMillis() > 0); + assertTrue(statsTracker.getLastSuccessfulDownloadTimestamp() > 0); + } + + private static void assertDownloadStatsNoDownloads(RemoteTranslogTransferTracker statsTracker) { + assertEquals(0, statsTracker.getDownloadBytesSucceeded()); + assertEquals(0, statsTracker.getTotalDownloadsSucceeded()); + assertEquals(0, statsTracker.getTotalDownloadTimeInMillis()); + assertEquals(0, statsTracker.getLastSuccessfulDownloadTimestamp()); + } + public void testUploadWithPrimaryModeFalse() { // Test setup primaryMode.set(false); @@ -269,6 +328,9 @@ public void testUploadWithPrimaryModeFalse() { throw new RuntimeException(e); } assertTrue(translog.syncNeeded()); + RemoteTranslogTransferTracker statsTracker = translog.getRemoteTranslogTracker(); + assertUploadStatsNoUploads(statsTracker); + assertDownloadStatsNoDownloads(statsTracker); } public void testUploadWithPrimaryModeTrue() { @@ -281,6 +343,9 @@ public void testUploadWithPrimaryModeTrue() { throw new RuntimeException(e); } assertFalse(translog.syncNeeded()); + RemoteTranslogTransferTracker statsTracker = translog.getRemoteTranslogTracker(); + assertUploadStatsNoFailures(statsTracker); + assertDownloadStatsNoDownloads(statsTracker); } public void testSimpleOperations() throws IOException { @@ -318,6 +383,111 @@ public void testSimpleOperations() throws IOException { } + private TranslogConfig getConfig(int gensToKeep) { + Path tempDir = createTempDir(); + final TranslogConfig temp = getTranslogConfig(tempDir, gensToKeep); + final TranslogConfig config = new TranslogConfig( + temp.getShardId(), + temp.getTranslogPath(), + temp.getIndexSettings(), + temp.getBigArrays(), + new ByteSizeValue(1, ByteSizeUnit.KB), + "" + ); + return config; + } + + private ChannelFactory getChannelFactory() { + writeCalls = new AtomicInteger(); + final ChannelFactory channelFactory = (file, openOption) -> { + FileChannel delegate = FileChannel.open(file, openOption); + boolean success = false; + try { + // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation + final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); + + final FileChannel channel; + if (isCkpFile) { + channel = delegate; + } else { + channel = new FilterFileChannel(delegate) { + + @Override + public int write(ByteBuffer src) throws IOException { + writeCalls.incrementAndGet(); + return super.write(src); + } + }; + } + success = true; + return channel; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(delegate); + } + } + }; + return channelFactory; + } + + public void testExtraGenToKeep() throws Exception { + TranslogConfig config = getConfig(1); + ChannelFactory channelFactory = getChannelFactory(); + final Set persistedSeqNos = new HashSet<>(); + String translogUUID = Translog.createEmptyTranslog( + config.getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, + shardId, + channelFactory, + primaryTerm.get() + ); + TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(config.getIndexSettings()); + ArrayList ops = new ArrayList<>(); + try ( + RemoteFsTranslog translog = new RemoteFsTranslog( + config, + translogUUID, + deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, + primaryTerm::get, + persistedSeqNos::add, + repository, + threadPool, + () -> Boolean.TRUE, + new RemoteTranslogTransferTracker(shardId, 10) + ) { + @Override + ChannelFactory getChannelFactory() { + return channelFactory; + } + } + ) { + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 1, primaryTerm.get(), new byte[] { 1 })); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("3", 2, primaryTerm.get(), new byte[] { 1 })); + + // expose the new checkpoint (simulating a commit), before we trim the translog + translog.setMinSeqNoToKeep(2); + + // Trims from local + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("4", 3, primaryTerm.get(), new byte[] { 1 })); + + // Trims from remote now + translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + assertEquals( + 6, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + + } + } + public void testReadLocation() throws IOException { ArrayList ops = new ArrayList<>(); ArrayList locs = new ArrayList<>(); @@ -330,6 +500,9 @@ public void testReadLocation() throws IOException { assertEquals(op, translog.readOperation(locs.get(i++))); } assertNull(translog.readOperation(new Translog.Location(100, 0, 0))); + RemoteTranslogTransferTracker statsTracker = translog.getRemoteTranslogTracker(); + assertUploadStatsNoFailures(statsTracker); + assertDownloadStatsNoDownloads(statsTracker); } public void testReadLocationDownload() throws IOException { @@ -338,12 +511,17 @@ public void testReadLocationDownload() throws IOException { locs.add(addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 }))); locs.add(addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 1, primaryTerm.get(), new byte[] { 1 }))); locs.add(addToTranslogAndListAndUpload(translog, ops, new Translog.Index("3", 2, primaryTerm.get(), new byte[] { 1 }))); + translog.sync(); int i = 0; for (Translog.Operation op : ops) { assertEquals(op, translog.readOperation(locs.get(i++))); } + RemoteTranslogTransferTracker statsTracker = translog.getRemoteTranslogTracker(); + assertUploadStatsNoFailures(statsTracker); + assertDownloadStatsNoDownloads(statsTracker); + String translogUUID = translog.translogUUID; try { translog.getDeletionPolicy().assertNoOpenTranslogRefs(); @@ -358,11 +536,16 @@ public void testReadLocationDownload() throws IOException { } // Creating RemoteFsTranslog with the same location - Translog newTranslog = create(translogDir, repository, translogUUID); + RemoteFsTranslog newTranslog = create(translogDir, repository, translogUUID); i = 0; for (Translog.Operation op : ops) { assertEquals(op, newTranslog.readOperation(locs.get(i++))); } + + statsTracker = newTranslog.getRemoteTranslogTracker(); + assertUploadStatsNoUploads(statsTracker); + assertDownloadStatsPopulated(statsTracker); + try { newTranslog.close(); } catch (Exception e) { @@ -552,13 +735,22 @@ public void testSimpleOperationsUpload() throws Exception { // this should now trim as tlog-2 files from remote, but not tlog-3 and tlog-4 addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 2, primaryTerm.get(), new byte[] { 1 })); assertEquals(2, translog.stats().estimatedNumberOfOperations()); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); translog.setMinSeqNoToKeep(2); - + // this should now trim as tlog-2 files from remote, but not tlog-3 and tlog-4 translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); assertEquals(1, translog.stats().estimatedNumberOfOperations()); - assertBusy(() -> assertEquals(4, translog.allUploaded().size())); + assertBusy(() -> { + assertEquals(4, translog.allUploaded().size()); + assertEquals( + 4, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + }); + } public void testMetadataFileDeletion() throws Exception { @@ -976,21 +1168,25 @@ public void testSyncUpTo() throws IOException { if (randomBoolean()) { translog.sync(); assertFalse("translog has been synced already", translog.ensureSynced(location)); + RemoteTranslogTransferTracker statsTracker = translog.getRemoteTranslogTracker(); + assertUploadStatsNoFailures(statsTracker); + assertDownloadStatsNoDownloads(statsTracker); } } } - public void testSyncUpFailure() throws IOException { + public void testSyncUpLocationFailure() throws IOException { int translogOperations = randomIntBetween(1, 20); int count = 0; fail.failAlways(); ArrayList locations = new ArrayList<>(); + boolean shouldFailAlways = randomBoolean(); for (int op = 0; op < translogOperations; op++) { int seqNo = ++count; final Translog.Location location = translog.add( new Translog.Index("" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))) ); - if (randomBoolean()) { + if (shouldFailAlways) { fail.failAlways(); try { translog.ensureSynced(location); @@ -1016,6 +1212,39 @@ public void testSyncUpFailure() throws IOException { assertFalse("all of the locations should be synced: " + location, translog.ensureSynced(location)); } + RemoteTranslogTransferTracker statsTracker = translog.getRemoteTranslogTracker(); + assertTrue(statsTracker.getUploadBytesStarted() > 0); + assertTrue(statsTracker.getTotalUploadsStarted() > 0); + + if (shouldFailAlways) { + assertTrue(statsTracker.getTotalUploadsFailed() > 0); + } else { + assertEquals(0, statsTracker.getTotalUploadsFailed()); + } + + assertTrue(statsTracker.getTotalUploadsSucceeded() > 0); + assertTrue(statsTracker.getLastSuccessfulUploadTimestamp() > 0); + assertDownloadStatsNoDownloads(statsTracker); + } + + public void testSyncUpAlwaysFailure() throws IOException { + int translogOperations = randomIntBetween(1, 20); + int count = 0; + fail.failAlways(); + for (int op = 0; op < translogOperations; op++) { + translog.add( + new Translog.Index(String.valueOf(op), count, primaryTerm.get(), Integer.toString(count).getBytes(StandardCharsets.UTF_8)) + ); + try { + translog.sync(); + fail("io exception expected"); + } catch (TranslogUploadFailedException e) { + assertTrue("at least one operation pending", translog.syncNeeded()); + } + } + assertTrue(translog.isOpen()); + fail.failNever(); + translog.sync(); } public void testSyncUpToStream() throws IOException { @@ -1048,6 +1277,11 @@ public void testSyncUpToStream() throws IOException { translog.sync(); assertFalse("translog has been synced already", translog.ensureSynced(locations.stream())); } + + RemoteTranslogTransferTracker statsTracker = translog.getRemoteTranslogTracker(); + assertUploadStatsNoFailures(statsTracker); + assertDownloadStatsNoDownloads(statsTracker); + for (Translog.Location location : locations) { assertFalse("all of the locations should be synced: " + location, translog.ensureSynced(location)); } @@ -1163,48 +1397,10 @@ public void testTranslogWriter() throws IOException { } public void testTranslogWriterCanFlushInAddOrReadCall() throws IOException { - Path tempDir = createTempDir(); - final TranslogConfig temp = getTranslogConfig(tempDir); - final TranslogConfig config = new TranslogConfig( - temp.getShardId(), - temp.getTranslogPath(), - temp.getIndexSettings(), - temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB) - ); - + final TranslogConfig config = getConfig(1); final Set persistedSeqNos = new HashSet<>(); - final AtomicInteger writeCalls = new AtomicInteger(); - - final ChannelFactory channelFactory = (file, openOption) -> { - FileChannel delegate = FileChannel.open(file, openOption); - boolean success = false; - try { - // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation - final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); - - final FileChannel channel; - if (isCkpFile) { - channel = delegate; - } else { - channel = new FilterFileChannel(delegate) { - - @Override - public int write(ByteBuffer src) throws IOException { - writeCalls.incrementAndGet(); - return super.write(src); - } - }; - } - success = true; - return channel; - } finally { - if (success == false) { - IOUtils.closeWhileHandlingException(delegate); - } - } - }; - + writeCalls = new AtomicInteger(); + final ChannelFactory channelFactory = getChannelFactory(); String translogUUID = Translog.createEmptyTranslog( config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, @@ -1223,7 +1419,8 @@ public int write(ByteBuffer src) throws IOException { persistedSeqNos::add, repository, threadPool, - () -> Boolean.TRUE + () -> Boolean.TRUE, + new RemoteTranslogTransferTracker(shardId, 10) ) { @Override ChannelFactory getChannelFactory() { @@ -1271,7 +1468,8 @@ public void testTranslogWriterFsyncDisabledInRemoteFsTranslog() throws IOExcepti temp.getTranslogPath(), temp.getIndexSettings(), temp.getBigArrays(), - new ByteSizeValue(1, ByteSizeUnit.KB) + new ByteSizeValue(1, ByteSizeUnit.KB), + "" ); final Set persistedSeqNos = new HashSet<>(); @@ -1329,7 +1527,8 @@ public void force(boolean metaData) throws IOException { persistedSeqNos::add, repository, threadPool, - () -> Boolean.TRUE + () -> Boolean.TRUE, + new RemoteTranslogTransferTracker(shardId, 10) ) { @Override ChannelFactory getChannelFactory() { @@ -1384,6 +1583,46 @@ public void testCloseIntoReader() throws IOException { } } + public void testDownloadWithRetries() throws IOException { + long generation = 1, primaryTerm = 1; + Path location = createTempDir(); + TranslogTransferMetadata translogTransferMetadata = new TranslogTransferMetadata(primaryTerm, generation, generation, 1); + Map generationToPrimaryTermMapper = new HashMap<>(); + generationToPrimaryTermMapper.put(String.valueOf(generation), String.valueOf(primaryTerm)); + translogTransferMetadata.setGenerationToPrimaryTermMapper(generationToPrimaryTermMapper); + + TranslogTransferManager mockTransfer = mock(TranslogTransferManager.class); + RemoteTranslogTransferTracker remoteTranslogTransferTracker = mock(RemoteTranslogTransferTracker.class); + when(mockTransfer.readMetadata()).thenReturn(translogTransferMetadata); + when(mockTransfer.getRemoteTranslogTransferTracker()).thenReturn(remoteTranslogTransferTracker); + + // Always File not found + when(mockTransfer.downloadTranslog(any(), any(), any())).thenThrow(new NoSuchFileException("File not found")); + TranslogTransferManager finalMockTransfer = mockTransfer; + assertThrows(NoSuchFileException.class, () -> RemoteFsTranslog.download(finalMockTransfer, location, logger)); + + // File not found in first attempt . File found in second attempt. + mockTransfer = mock(TranslogTransferManager.class); + when(mockTransfer.readMetadata()).thenReturn(translogTransferMetadata); + when(mockTransfer.getRemoteTranslogTransferTracker()).thenReturn(remoteTranslogTransferTracker); + String msg = "File not found"; + Exception toThrow = randomBoolean() ? new NoSuchFileException(msg) : new FileNotFoundException(msg); + when(mockTransfer.downloadTranslog(any(), any(), any())).thenThrow(toThrow).thenReturn(true); + + AtomicLong downloadCounter = new AtomicLong(); + doAnswer(invocation -> { + if (downloadCounter.incrementAndGet() <= 1) { + throw new NoSuchFileException("File not found"); + } else if (downloadCounter.get() == 2) { + Files.createFile(location.resolve(Translog.getCommitCheckpointFileName(generation))); + } + return true; + }).when(mockTransfer).downloadTranslog(any(), any(), any()); + + // no exception thrown + RemoteFsTranslog.download(mockTransfer, location, logger); + } + public class ThrowingBlobRepository extends FsRepository { private final Environment environment; diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteTranslogStatsTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteTranslogStatsTests.java new file mode 100644 index 0000000000000..aa390cdba1275 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/translog/RemoteTranslogStatsTests.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class RemoteTranslogStatsTests extends OpenSearchTestCase { + RemoteTranslogTransferTracker.Stats transferTrackerStats; + RemoteTranslogStats remoteTranslogStats; + + @Override + public void setUp() throws Exception { + super.setUp(); + transferTrackerStats = getRandomTransferTrackerStats(); + remoteTranslogStats = new RemoteTranslogStats(transferTrackerStats); + } + + public void testRemoteTranslogStatsCreationFromTransferTrackerStats() { + assertEquals(transferTrackerStats.totalUploadsStarted, remoteTranslogStats.getTotalUploadsStarted()); + assertEquals(transferTrackerStats.totalUploadsSucceeded, remoteTranslogStats.getTotalUploadsSucceeded()); + assertEquals(transferTrackerStats.totalUploadsFailed, remoteTranslogStats.getTotalUploadsFailed()); + assertEquals(transferTrackerStats.uploadBytesStarted, remoteTranslogStats.getUploadBytesStarted()); + assertEquals(transferTrackerStats.uploadBytesSucceeded, remoteTranslogStats.getUploadBytesSucceeded()); + assertEquals(transferTrackerStats.uploadBytesFailed, remoteTranslogStats.getUploadBytesFailed()); + } + + public void testRemoteTranslogStatsSerialization() throws IOException { + try (BytesStreamOutput out = new BytesStreamOutput()) { + remoteTranslogStats.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + RemoteTranslogStats remoteTranslogStatsFromStream = new RemoteTranslogStats(in); + assertEquals(remoteTranslogStats, remoteTranslogStatsFromStream); + } + } + } + + public void testAdd() { + RemoteTranslogTransferTracker.Stats otherTransferTrackerStats = getRandomTransferTrackerStats(); + RemoteTranslogStats otherRemoteTranslogStats = new RemoteTranslogStats(otherTransferTrackerStats); + + otherRemoteTranslogStats.add(remoteTranslogStats); + + assertEquals( + otherRemoteTranslogStats.getTotalUploadsStarted(), + otherTransferTrackerStats.totalUploadsStarted + remoteTranslogStats.getTotalUploadsStarted() + ); + assertEquals( + otherRemoteTranslogStats.getTotalUploadsSucceeded(), + otherTransferTrackerStats.totalUploadsSucceeded + remoteTranslogStats.getTotalUploadsSucceeded() + ); + assertEquals( + otherRemoteTranslogStats.getTotalUploadsFailed(), + otherTransferTrackerStats.totalUploadsFailed + remoteTranslogStats.getTotalUploadsFailed() + ); + assertEquals( + otherRemoteTranslogStats.getUploadBytesStarted(), + otherTransferTrackerStats.uploadBytesStarted + remoteTranslogStats.getUploadBytesStarted() + ); + assertEquals( + otherRemoteTranslogStats.getUploadBytesSucceeded(), + otherTransferTrackerStats.uploadBytesSucceeded + remoteTranslogStats.getUploadBytesSucceeded() + ); + assertEquals( + otherRemoteTranslogStats.getUploadBytesFailed(), + otherTransferTrackerStats.uploadBytesFailed + remoteTranslogStats.getUploadBytesFailed() + ); + } + + private static RemoteTranslogTransferTracker.Stats getRandomTransferTrackerStats() { + return new RemoteTranslogTransferTracker.Stats( + new ShardId("test-idx", "test-idx", randomIntBetween(1, 10)), + 0L, + randomLongBetween(100, 500), + randomLongBetween(50, 100), + randomLongBetween(100, 200), + randomLongBetween(10000, 50000), + randomLongBetween(5000, 10000), + randomLongBetween(10000, 20000), + 0L, + 0D, + 0D, + 0D, + 0L, + 0L, + 0L, + 0L, + 0D, + 0D, + 0D + ); + } +} diff --git a/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java b/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java index 43b4d2c9847ab..e17d2770f014a 100644 --- a/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java +++ b/server/src/test/java/org/opensearch/index/translog/TranslogManagerTestCase.java @@ -74,7 +74,7 @@ protected Translog createTranslog(LongSupplier primaryTermSupplier) throws IOExc } protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSupplier) throws IOException { - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE); + TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""); String translogUUID = Translog.createEmptyTranslog( translogPath, SequenceNumbers.NO_OPS_PERFORMED, diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceMockRepositoryTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceMockRepositoryTests.java index e629929a9196a..a806eea381297 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceMockRepositoryTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceMockRepositoryTests.java @@ -9,9 +9,9 @@ package org.opensearch.index.translog.transfer; import org.opensearch.action.LatchedActionListener; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.common.blobstore.VerifyingMultiStreamBlobContainer; import org.opensearch.common.blobstore.stream.write.WriteContext; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.core.action.ActionListener; @@ -60,7 +60,7 @@ public void testUploadBlobs() throws Exception { 0L ); - VerifyingMultiStreamBlobContainer blobContainer = mock(VerifyingMultiStreamBlobContainer.class); + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); Mockito.doAnswer(invocation -> { ActionListener completionListener = invocation.getArgument(1); completionListener.onResponse(null); @@ -107,7 +107,7 @@ public void testUploadBlobsIOException() throws Exception { 0L ); - VerifyingMultiStreamBlobContainer blobContainer = mock(VerifyingMultiStreamBlobContainer.class); + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); doThrow(new IOException()).when(blobContainer).asyncBlobUpload(any(WriteContext.class), any()); when(blobStore.blobContainer(any(BlobPath.class))).thenReturn(blobContainer); @@ -146,7 +146,7 @@ public void testUploadBlobsUploadFutureCompletedExceptionally() throws Exception 0L ); - VerifyingMultiStreamBlobContainer blobContainer = mock(VerifyingMultiStreamBlobContainer.class); + AsyncMultiStreamBlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); Mockito.doAnswer(invocation -> { ActionListener completionListener = invocation.getArgument(1); completionListener.onFailure(new Exception("Test exception")); diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/FileTransferTrackerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/FileTransferTrackerTests.java index 1914790ac58d2..b96ada1f6bbff 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/FileTransferTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/FileTransferTrackerTests.java @@ -9,27 +9,33 @@ package org.opensearch.index.translog.transfer; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.util.HashSet; import java.util.List; +import java.util.Set; public class FileTransferTrackerTests extends OpenSearchTestCase { protected final ShardId shardId = new ShardId("index", "_na_", 1); FileTransferTracker fileTransferTracker; + RemoteTranslogTransferTracker remoteTranslogTransferTracker; @Override public void setUp() throws Exception { super.setUp(); + remoteTranslogTransferTracker = new RemoteTranslogTransferTracker(shardId, 20); + fileTransferTracker = new FileTransferTracker(shardId, remoteTranslogTransferTracker); } public void testOnSuccess() throws IOException { - fileTransferTracker = new FileTransferTracker(shardId); Path testFile = createTempFile(); + int fileSize = 128; Files.write(testFile, randomByteArrayOfLength(128), StandardOpenOption.APPEND); try ( FileSnapshot.TransferFileSnapshot transferFileSnapshot = new FileSnapshot.TransferFileSnapshot( @@ -38,11 +44,17 @@ public void testOnSuccess() throws IOException { null ) ) { + Set toUpload = new HashSet<>(2); + toUpload.add(transferFileSnapshot); + fileTransferTracker.recordBytesForFiles(toUpload); + remoteTranslogTransferTracker.addUploadBytesStarted(fileSize); fileTransferTracker.onSuccess(transferFileSnapshot); // idempotent + remoteTranslogTransferTracker.addUploadBytesStarted(fileSize); fileTransferTracker.onSuccess(transferFileSnapshot); assertEquals(fileTransferTracker.allUploaded().size(), 1); try { + remoteTranslogTransferTracker.addUploadBytesStarted(fileSize); fileTransferTracker.onFailure(transferFileSnapshot, new IOException("random exception")); fail("failure after succcess invalid"); } catch (IllegalStateException ex) { @@ -52,10 +64,10 @@ public void testOnSuccess() throws IOException { } public void testOnFailure() throws IOException { - fileTransferTracker = new FileTransferTracker(shardId); Path testFile = createTempFile(); Path testFile2 = createTempFile(); - Files.write(testFile, randomByteArrayOfLength(128), StandardOpenOption.APPEND); + int fileSize = 128; + Files.write(testFile, randomByteArrayOfLength(fileSize), StandardOpenOption.APPEND); try ( FileSnapshot.TransferFileSnapshot transferFileSnapshot = new FileSnapshot.TransferFileSnapshot( testFile, @@ -66,30 +78,37 @@ public void testOnFailure() throws IOException { testFile2, randomNonNegativeLong(), null - ) + ); ) { - + Set toUpload = new HashSet<>(2); + toUpload.add(transferFileSnapshot); + toUpload.add(transferFileSnapshot2); + fileTransferTracker.recordBytesForFiles(toUpload); + remoteTranslogTransferTracker.addUploadBytesStarted(fileSize); fileTransferTracker.onFailure(transferFileSnapshot, new IOException("random exception")); fileTransferTracker.onSuccess(transferFileSnapshot2); assertEquals(fileTransferTracker.allUploaded().size(), 1); - + remoteTranslogTransferTracker.addUploadBytesStarted(fileSize); fileTransferTracker.onSuccess(transferFileSnapshot); assertEquals(fileTransferTracker.allUploaded().size(), 2); } } public void testUploaded() throws IOException { - fileTransferTracker = new FileTransferTracker(shardId); Path testFile = createTempFile(); - Files.write(testFile, randomByteArrayOfLength(128), StandardOpenOption.APPEND); + int fileSize = 128; + Files.write(testFile, randomByteArrayOfLength(fileSize), StandardOpenOption.APPEND); try ( FileSnapshot.TransferFileSnapshot transferFileSnapshot = new FileSnapshot.TransferFileSnapshot( testFile, randomNonNegativeLong(), null ); - ) { + Set toUpload = new HashSet<>(2); + toUpload.add(transferFileSnapshot); + fileTransferTracker.recordBytesForFiles(toUpload); + remoteTranslogTransferTracker.addUploadBytesStarted(fileSize); fileTransferTracker.onSuccess(transferFileSnapshot); String fileName = String.valueOf(testFile.getFileName()); assertTrue(fileTransferTracker.uploaded(fileName)); diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index a49cbe6d5eec4..e34bc078896f9 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -10,15 +10,18 @@ import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.action.LatchedActionListener; +import org.opensearch.common.SetOnce; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.collect.Tuple; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.transfer.FileSnapshot.CheckpointFileSnapshot; import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; @@ -33,15 +36,19 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.LinkedList; import java.util.List; +import java.util.Objects; import java.util.Set; +import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; import org.mockito.Mockito; +import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.anySet; @@ -63,6 +70,12 @@ public class TranslogTransferManagerTests extends OpenSearchTestCase { private long primaryTerm; private long generation; private long minTranslogGeneration; + private RemoteTranslogTransferTracker remoteTranslogTransferTracker; + byte[] tlogBytes; + byte[] ckpBytes; + FileTransferTracker tracker; + TranslogTransferManager translogTransferManager; + long delayForBlobDownload; @Override public void setUp() throws Exception { @@ -75,6 +88,28 @@ public void setUp() throws Exception { remoteBaseTransferPath = new BlobPath().add("base_path"); transferService = mock(TransferService.class); threadPool = new TestThreadPool(getClass().getName()); + remoteTranslogTransferTracker = new RemoteTranslogTransferTracker(shardId, 20); + tlogBytes = "Hello Translog".getBytes(StandardCharsets.UTF_8); + ckpBytes = "Hello Checkpoint".getBytes(StandardCharsets.UTF_8); + tracker = new FileTransferTracker(new ShardId("index", "indexUuid", 0), remoteTranslogTransferTracker); + translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath, + tracker, + remoteTranslogTransferTracker + ); + + delayForBlobDownload = 1; + when(transferService.downloadBlob(any(BlobPath.class), eq("translog-23.tlog"))).thenAnswer(invocation -> { + Thread.sleep(delayForBlobDownload); + return new ByteArrayInputStream(tlogBytes); + }); + + when(transferService.downloadBlob(any(BlobPath.class), eq("translog-23.ckp"))).thenAnswer(invocation -> { + Thread.sleep(delayForBlobDownload); + return new ByteArrayInputStream(ckpBytes); + }); } @Override @@ -103,7 +138,10 @@ public void testTransferSnapshot() throws Exception { return null; }).when(transferService).uploadBlobs(anySet(), anyMap(), any(ActionListener.class), any(WritePriority.class)); - FileTransferTracker fileTransferTracker = new FileTransferTracker(new ShardId("index", "indexUUid", 0)) { + FileTransferTracker fileTransferTracker = new FileTransferTracker( + new ShardId("index", "indexUUid", 0), + remoteTranslogTransferTracker + ) { @Override public void onSuccess(TransferFileSnapshot fileSnapshot) { fileTransferSucceeded.incrementAndGet(); @@ -122,7 +160,8 @@ public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { shardId, transferService, remoteBaseTransferPath, - fileTransferTracker + fileTransferTracker, + remoteTranslogTransferTracker ); assertTrue(translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { @@ -143,6 +182,93 @@ public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { assertEquals(4, fileTransferTracker.allUploaded().size()); } + public void testTransferSnapshotOnUploadTimeout() throws Exception { + doAnswer(invocationOnMock -> { + Thread.sleep(31 * 1000); + return null; + }).when(transferService).uploadBlobs(anySet(), anyMap(), any(ActionListener.class), any(WritePriority.class)); + FileTransferTracker fileTransferTracker = new FileTransferTracker( + new ShardId("index", "indexUUid", 0), + remoteTranslogTransferTracker + ); + TranslogTransferManager translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath, + fileTransferTracker, + remoteTranslogTransferTracker + ); + SetOnce exception = new SetOnce<>(); + translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { + @Override + public void onUploadComplete(TransferSnapshot transferSnapshot) {} + + @Override + public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { + exception.set(ex); + } + }); + assertNotNull(exception.get()); + assertTrue(exception.get() instanceof TranslogUploadFailedException); + assertEquals("Timed out waiting for transfer of snapshot test-to-string to complete", exception.get().getMessage()); + } + + public void testTransferSnapshotOnThreadInterrupt() throws Exception { + SetOnce uploadThread = new SetOnce<>(); + doAnswer(invocationOnMock -> { + uploadThread.set(new Thread(() -> { + ActionListener listener = invocationOnMock.getArgument(2); + try { + Thread.sleep(31 * 1000); + } catch (InterruptedException ignore) { + List list = new ArrayList<>(invocationOnMock.getArgument(0)); + listener.onFailure(new FileTransferException(list.get(0), ignore)); + } + })); + uploadThread.get().start(); + return null; + }).when(transferService).uploadBlobs(anySet(), anyMap(), any(ActionListener.class), any(WritePriority.class)); + FileTransferTracker fileTransferTracker = new FileTransferTracker( + new ShardId("index", "indexUUid", 0), + remoteTranslogTransferTracker + ); + TranslogTransferManager translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath, + fileTransferTracker, + remoteTranslogTransferTracker + ); + SetOnce exception = new SetOnce<>(); + + Thread thread = new Thread(() -> { + try { + translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { + @Override + public void onUploadComplete(TransferSnapshot transferSnapshot) {} + + @Override + public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { + exception.set(ex); + } + }); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + thread.start(); + + Thread.sleep(1000); + // Interrupt the thread + thread.interrupt(); + assertBusy(() -> { + assertNotNull(exception.get()); + assertTrue(exception.get() instanceof TranslogUploadFailedException); + assertEquals("Failed to upload test-to-string", exception.get().getMessage()); + }); + uploadThread.get().interrupt(); + } + private TransferSnapshot createTransferSnapshot() { return new TransferSnapshot() { @Override @@ -195,6 +321,11 @@ public Set getTranslogFileSnapshots() { public TranslogTransferMetadata getTranslogTransferMetadata() { return new TranslogTransferMetadata(primaryTerm, generation, minTranslogGeneration, randomInt(5)); } + + @Override + public String toString() { + return "test-to-string"; + } }; } @@ -203,7 +334,8 @@ public void testReadMetadataNoFile() throws IOException { shardId, transferService, remoteBaseTransferPath, - null + null, + remoteTranslogTransferTracker ); doAnswer(invocation -> { LatchedActionListener> latchedActionListener = invocation.getArgument(3); @@ -214,33 +346,44 @@ public void testReadMetadataNoFile() throws IOException { .listAllInSortedOrder(any(BlobPath.class), eq(TranslogTransferMetadata.METADATA_PREFIX), anyInt(), any(ActionListener.class)); assertNull(translogTransferManager.readMetadata()); + assertNoDownloadStats(false); } - // This should happen most of the time - Just a single metadata file - public void testReadMetadataSingleFile() throws IOException { + // This should happen most of the time - + public void testReadMetadataFile() throws IOException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, remoteBaseTransferPath, - null + null, + remoteTranslogTransferTracker ); - TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2); - String mdFilename = tm.getFileName(); + TranslogTransferMetadata metadata1 = new TranslogTransferMetadata(1, 1, 1, 2); + String mdFilename1 = metadata1.getFileName(); + + TranslogTransferMetadata metadata2 = new TranslogTransferMetadata(1, 0, 1, 2); + String mdFilename2 = metadata2.getFileName(); doAnswer(invocation -> { LatchedActionListener> latchedActionListener = invocation.getArgument(3); List bmList = new LinkedList<>(); - bmList.add(new PlainBlobMetadata(mdFilename, 1)); + bmList.add(new PlainBlobMetadata(mdFilename1, 1)); + bmList.add(new PlainBlobMetadata(mdFilename2, 1)); latchedActionListener.onResponse(bmList); return null; }).when(transferService) .listAllInSortedOrder(any(BlobPath.class), eq(TranslogTransferMetadata.METADATA_PREFIX), anyInt(), any(ActionListener.class)); TranslogTransferMetadata metadata = createTransferSnapshot().getTranslogTransferMetadata(); - when(transferService.downloadBlob(any(BlobPath.class), eq(mdFilename))).thenReturn( - new ByteArrayInputStream(translogTransferManager.getMetadataBytes(metadata)) - ); + long delayForMdDownload = 1; + when(transferService.downloadBlob(any(BlobPath.class), eq(mdFilename1))).thenAnswer(invocation -> { + Thread.sleep(delayForMdDownload); + return new ByteArrayInputStream(translogTransferManager.getMetadataBytes(metadata)); + }); assertEquals(metadata, translogTransferManager.readMetadata()); + + assertEquals(translogTransferManager.getMetadataBytes(metadata).length, remoteTranslogTransferTracker.getDownloadBytesSucceeded()); + assertTrue(remoteTranslogTransferTracker.getTotalDownloadTimeInMillis() >= delayForMdDownload); } public void testReadMetadataReadException() throws IOException { @@ -248,9 +391,9 @@ public void testReadMetadataReadException() throws IOException { shardId, transferService, remoteBaseTransferPath, - null + null, + remoteTranslogTransferTracker ); - TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2); String mdFilename = tm.getFileName(); @@ -266,6 +409,7 @@ public void testReadMetadataReadException() throws IOException { when(transferService.downloadBlob(any(BlobPath.class), eq(mdFilename))).thenThrow(new IOException("Something went wrong")); assertThrows(IOException.class, translogTransferManager::readMetadata); + assertNoDownloadStats(true); } public void testMetadataFileNameOrder() throws IOException { @@ -283,7 +427,8 @@ public void testReadMetadataListException() throws IOException { shardId, transferService, remoteBaseTransferPath, - null + null, + remoteTranslogTransferTracker ); doAnswer(invocation -> { @@ -296,81 +441,39 @@ public void testReadMetadataListException() throws IOException { when(transferService.downloadBlob(any(BlobPath.class), any(String.class))).thenThrow(new IOException("Something went wrong")); assertThrows(IOException.class, translogTransferManager::readMetadata); + assertNoDownloadStats(false); } public void testDownloadTranslog() throws IOException { Path location = createTempDir(); - TranslogTransferManager translogTransferManager = new TranslogTransferManager( - shardId, - transferService, - remoteBaseTransferPath, - new FileTransferTracker(new ShardId("index", "indexUuid", 0)) - ); - - when(transferService.downloadBlob(any(BlobPath.class), eq("translog-23.tlog"))).thenReturn( - new ByteArrayInputStream("Hello Translog".getBytes(StandardCharsets.UTF_8)) - ); - - when(transferService.downloadBlob(any(BlobPath.class), eq("translog-23.ckp"))).thenReturn( - new ByteArrayInputStream("Hello Checkpoint".getBytes(StandardCharsets.UTF_8)) - ); - assertFalse(Files.exists(location.resolve("translog-23.tlog"))); assertFalse(Files.exists(location.resolve("translog-23.ckp"))); translogTransferManager.downloadTranslog("12", "23", location); assertTrue(Files.exists(location.resolve("translog-23.tlog"))); assertTrue(Files.exists(location.resolve("translog-23.ckp"))); + assertTlogCkpDownloadStats(); } public void testDownloadTranslogAlreadyExists() throws IOException { - FileTransferTracker tracker = new FileTransferTracker(new ShardId("index", "indexUuid", 0)); Path location = createTempDir(); Files.createFile(location.resolve("translog-23.tlog")); Files.createFile(location.resolve("translog-23.ckp")); - TranslogTransferManager translogTransferManager = new TranslogTransferManager( - shardId, - transferService, - remoteBaseTransferPath, - tracker - ); - - when(transferService.downloadBlob(any(BlobPath.class), eq("translog-23.tlog"))).thenReturn( - new ByteArrayInputStream("Hello Translog".getBytes(StandardCharsets.UTF_8)) - ); - when(transferService.downloadBlob(any(BlobPath.class), eq("translog-23.ckp"))).thenReturn( - new ByteArrayInputStream("Hello Checkpoint".getBytes(StandardCharsets.UTF_8)) - ); - translogTransferManager.downloadTranslog("12", "23", location); verify(transferService).downloadBlob(any(BlobPath.class), eq("translog-23.tlog")); verify(transferService).downloadBlob(any(BlobPath.class), eq("translog-23.ckp")); assertTrue(Files.exists(location.resolve("translog-23.tlog"))); assertTrue(Files.exists(location.resolve("translog-23.ckp"))); + assertTlogCkpDownloadStats(); } public void testDownloadTranslogWithTrackerUpdated() throws IOException { - FileTransferTracker tracker = new FileTransferTracker(new ShardId("index", "indexUuid", 0)); Path location = createTempDir(); String translogFile = "translog-23.tlog", checkpointFile = "translog-23.ckp"; Files.createFile(location.resolve(translogFile)); Files.createFile(location.resolve(checkpointFile)); - TranslogTransferManager translogTransferManager = new TranslogTransferManager( - shardId, - transferService, - remoteBaseTransferPath, - tracker - ); - - when(transferService.downloadBlob(any(BlobPath.class), eq(translogFile))).thenReturn( - new ByteArrayInputStream("Hello Translog".getBytes(StandardCharsets.UTF_8)) - ); - when(transferService.downloadBlob(any(BlobPath.class), eq(checkpointFile))).thenReturn( - new ByteArrayInputStream("Hello Checkpoint".getBytes(StandardCharsets.UTF_8)) - ); - translogTransferManager.downloadTranslog("12", "23", location); verify(transferService).downloadBlob(any(BlobPath.class), eq(translogFile)); @@ -385,10 +488,10 @@ public void testDownloadTranslogWithTrackerUpdated() throws IOException { // Since the tracker already holds the files with success state, adding them with success state is allowed tracker.add(translogFile, true); tracker.add(checkpointFile, true); + assertTlogCkpDownloadStats(); } public void testDeleteTranslogSuccess() throws Exception { - FileTransferTracker tracker = new FileTransferTracker(new ShardId("index", "indexUuid", 0)); BlobStore blobStore = mock(BlobStore.class); BlobContainer blobContainer = mock(BlobContainer.class); when(blobStore.blobContainer(any(BlobPath.class))).thenReturn(blobContainer); @@ -397,7 +500,8 @@ public void testDeleteTranslogSuccess() throws Exception { shardId, blobStoreTransferService, remoteBaseTransferPath, - tracker + tracker, + remoteTranslogTransferTracker ); String translogFile = "translog-19.tlog", checkpointFile = "translog-19.ckp"; tracker.add(translogFile, true); @@ -415,7 +519,8 @@ public void testDeleteStaleTranslogMetadata() { shardId, transferService, remoteBaseTransferPath, - null + null, + remoteTranslogTransferTracker ); String tm1 = new TranslogTransferMetadata(1, 1, 1, 2).getFileName(); String tm2 = new TranslogTransferMetadata(1, 2, 1, 2).getFileName(); @@ -455,7 +560,7 @@ public void testDeleteStaleTranslogMetadata() { } public void testDeleteTranslogFailure() throws Exception { - FileTransferTracker tracker = new FileTransferTracker(new ShardId("index", "indexUuid", 0)); + FileTransferTracker tracker = new FileTransferTracker(new ShardId("index", "indexUuid", 0), remoteTranslogTransferTracker); BlobStore blobStore = mock(BlobStore.class); BlobContainer blobContainer = mock(BlobContainer.class); doAnswer(invocation -> { throw new IOException("test exception"); }).when(blobStore).blobContainer(any(BlobPath.class)); @@ -465,7 +570,8 @@ public void testDeleteTranslogFailure() throws Exception { shardId, blobStoreTransferService, remoteBaseTransferPath, - tracker + tracker, + remoteTranslogTransferTracker ); String translogFile = "translog-19.tlog", checkpointFile = "translog-19.ckp"; tracker.add(translogFile, true); @@ -475,4 +581,60 @@ public void testDeleteTranslogFailure() throws Exception { translogTransferManager.deleteGenerationAsync(primaryTerm, Set.of(19L), () -> {}); assertEquals(2, tracker.allUploaded().size()); } + + private void assertNoDownloadStats(boolean nonZeroUploadTime) { + assertEquals(0, remoteTranslogTransferTracker.getDownloadBytesSucceeded()); + assertEquals(0, remoteTranslogTransferTracker.getTotalDownloadsSucceeded()); + assertEquals(0, remoteTranslogTransferTracker.getLastSuccessfulDownloadTimestamp()); + if (nonZeroUploadTime) { + assertNotEquals(0, remoteTranslogTransferTracker.getTotalDownloadTimeInMillis()); + } else { + assertEquals(0, remoteTranslogTransferTracker.getTotalDownloadTimeInMillis()); + } + } + + private void assertTlogCkpDownloadStats() { + assertEquals(tlogBytes.length + ckpBytes.length, remoteTranslogTransferTracker.getDownloadBytesSucceeded()); + // Expect delay for both tlog and ckp file + assertTrue(remoteTranslogTransferTracker.getTotalDownloadTimeInMillis() >= 2 * delayForBlobDownload); + } + + public void testGetPrimaryTermAndGeneration() { + String nodeId = UUID.randomUUID().toString(); + String tm = new TranslogTransferMetadata(1, 2, 1, 2, nodeId).getFileName(); + Tuple, String> actualOutput = TranslogTransferMetadata.getNodeIdByPrimaryTermAndGeneration(tm); + assertEquals(1L, (long) (actualOutput.v1().v1())); + assertEquals(2L, (long) (actualOutput.v1().v2())); + assertEquals(String.valueOf(Objects.hash(nodeId)), actualOutput.v2()); + } + + public void testMetadataConflict() throws InterruptedException { + TranslogTransferManager translogTransferManager = new TranslogTransferManager( + shardId, + transferService, + remoteBaseTransferPath, + null, + remoteTranslogTransferTracker + ); + TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2, "node--1"); + String mdFilename = tm.getFileName(); + long count = mdFilename.chars().filter(ch -> ch == METADATA_SEPARATOR.charAt(0)).count(); + // There should not be any `_` in mdFile name as it is used a separator . + assertEquals(10, count); + Thread.sleep(1); + TranslogTransferMetadata tm2 = new TranslogTransferMetadata(1, 1, 1, 2, "node--2"); + String mdFilename2 = tm2.getFileName(); + + doAnswer(invocation -> { + LatchedActionListener> latchedActionListener = invocation.getArgument(3); + List bmList = new LinkedList<>(); + bmList.add(new PlainBlobMetadata(mdFilename, 1)); + bmList.add(new PlainBlobMetadata(mdFilename2, 1)); + latchedActionListener.onResponse(bmList); + return null; + }).when(transferService) + .listAllInSortedOrder(any(BlobPath.class), eq(TranslogTransferMetadata.METADATA_PREFIX), anyInt(), any(ActionListener.class)); + + assertThrows(RuntimeException.class, translogTransferManager::readMetadata); + } } diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index 4ef7b43284ed6..d679a3cc10ba4 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -628,4 +628,9 @@ public void testConflictingEngineFactories() { ".*multiple engine factories provided for \\[foobar/.*\\]: \\[.*FooEngineFactory\\],\\[.*BarEngineFactory\\].*"; assertThat(e, hasToString(new RegexMatcher(pattern))); } + + public void testClusterRemoteTranslogBufferIntervalDefault() { + IndicesService indicesService = getIndicesService(); + assertEquals(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, indicesService.getClusterRemoteTranslogBufferInterval()); + } } diff --git a/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java b/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java index 9be45d4e77940..6f36d22b7e17b 100644 --- a/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java +++ b/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java @@ -32,6 +32,8 @@ package org.opensearch.indices; +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.test.OpenSearchTestCase; @@ -43,7 +45,9 @@ public class NodeIndicesStatsTests extends OpenSearchTestCase { public void testInvalidLevel() { - final NodeIndicesStats stats = new NodeIndicesStats(null, Collections.emptyMap()); + CommonStats oldStats = new CommonStats(); + SearchRequestStats requestStats = new SearchRequestStats(); + final NodeIndicesStats stats = new NodeIndicesStats(oldStats, Collections.emptyMap(), requestStats); final String level = randomAlphaOfLength(16); final ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("level", level)); final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> stats.toXContent(null, params)); diff --git a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 405903c005a84..c455101ff4549 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -46,7 +46,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.remote.RemoteStorePressureService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; @@ -264,7 +264,7 @@ public MockIndexShard createShard( final RetentionLeaseSyncer retentionLeaseSyncer, final DiscoveryNode targetNode, final DiscoveryNode sourceNode, - final RemoteStorePressureService remoteStorePressureService + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory ) throws IOException { failRandomly(); RecoveryState recoveryState = new RecoveryState(shardRouting, targetNode, sourceNode); diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index 80fca08733fd7..82f34544f5fb5 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -91,6 +91,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.CheckedFunction; import org.opensearch.common.Priority; +import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; @@ -107,7 +108,10 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndices; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.snapshots.EmptySnapshotsInfoService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.gateway.TestGatewayAllocator; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; @@ -150,6 +154,8 @@ public class ClusterStateChanges { private final TransportUpdateSettingsAction transportUpdateSettingsAction; private final TransportClusterRerouteAction transportClusterRerouteAction; private final TransportCreateIndexAction transportCreateIndexAction; + private final RepositoriesService repositoriesService; + private final RemoteStoreNodeService remoteStoreNodeService; private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; private final JoinTaskExecutor joinTaskExecutor; @@ -232,7 +238,8 @@ public ClusterStateChanges(NamedXContentRegistry xContentRegistry, ThreadPool th TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(SETTINGS, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), clusterSettings, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); MetadataIndexUpgradeService metadataIndexUpgradeService = new MetadataIndexUpgradeService( SETTINGS, @@ -362,8 +369,26 @@ public IndexMetadata upgradeIndexMetadata(IndexMetadata indexMetadata, Version m indexNameExpressionResolver ); + repositoriesService = new RepositoriesService( + Settings.EMPTY, + clusterService, + transportService, + Collections.emptyMap(), + Collections.emptyMap(), + threadPool + ); + + remoteStoreNodeService = new RemoteStoreNodeService(new SetOnce<>(repositoriesService)::get, threadPool); + nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, logger); - joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, (s, p, r) -> {}, transportService); + joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + (s, p, r) -> {}, + transportService, + remoteStoreNodeService + ); } public ClusterState createIndex(ClusterState state, CreateIndexRequest request) { diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 0b291b3cf34b3..6ba49e74196ee 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -70,6 +70,7 @@ import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.repositories.RepositoriesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; @@ -547,7 +548,8 @@ private IndicesClusterStateService createIndicesClusterStateService( TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); final ClusterService clusterService = mock(ClusterService.class); final RepositoriesService repositoriesService = new RepositoriesService( diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java index 11d916616578d..ad90255a3cc3f 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java @@ -54,7 +54,7 @@ import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; +import org.opensearch.index.MergePolicyProvider; import org.opensearch.index.VersionType; import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.Engine; @@ -168,7 +168,7 @@ public void testRecoveryWithOutOfOrderDeleteWithSoftDeletes() throws Exception { .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 10) // If soft-deletes is enabled, delete#1 will be reclaimed because its segment (segment_1) is fully deleted // index#0 will be retained if merge is disabled; otherwise it will be reclaimed because gcp=3 and retained_ops=0 - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false) .build(); try (ReplicationGroup shards = createGroup(1, settings)) { shards.startAll(); diff --git a/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java index 920a3f2946884..131514eb019b3 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java @@ -10,7 +10,6 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.mapper.MapperService; @@ -18,7 +17,6 @@ import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.replication.common.ReplicationType; -import org.junit.Before; import java.nio.file.Path; @@ -31,14 +29,6 @@ public class RemoteStorePeerRecoverySourceHandlerTests extends OpenSearchIndexLe .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "100ms") .build(); - @Before - public void setup() { - // Todo: Remove feature flag once remote store integration with segrep goes GA - FeatureFlags.initializeFeatureFlags( - Settings.builder().put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING.getKey(), "true").build() - ); - } - public void testReplicaShardRecoveryUptoLastFlushedCommit() throws Exception { final Path remoteDir = createTempDir(); final String indexMapping = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": {} }"; diff --git a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java index 67729bbd5f76a..e4dd32e5c6f70 100644 --- a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java @@ -23,6 +23,7 @@ import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.transport.TransportService; @@ -68,7 +69,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -123,6 +125,7 @@ public void testGetSegmentFiles() { checkpoint, Arrays.asList(testMetadata), mock(IndexShard.class), + (fileName, bytesRecovered) -> {}, mock(ActionListener.class) ); CapturingTransport.CapturedRequest[] requestList = transport.getCapturedRequestsAndClear(); @@ -151,6 +154,7 @@ public void testTransportTimeoutForGetSegmentFilesAction() { checkpoint, Arrays.asList(testMetadata), mock(IndexShard.class), + (fileName, bytesRecovered) -> {}, mock(ActionListener.class) ); CapturingTransport.CapturedRequest[] requestList = transport.getCapturedRequestsAndClear(); @@ -176,6 +180,7 @@ public void testGetSegmentFiles_CancelWhileRequestOpen() throws InterruptedExcep checkpoint, Arrays.asList(testMetadata), mock(IndexShard.class), + (fileName, bytesRecovered) -> {}, new ActionListener<>() { @Override public void onResponse(GetSegmentFilesResponse getSegmentFilesResponse) { diff --git a/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java index 9204f48ba5bdd..287962b158c79 100644 --- a/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java @@ -90,7 +90,7 @@ public void testGetSegmentFiles() throws ExecutionException, InterruptedExceptio List filesToFetch = primaryShard.getSegmentMetadataMap().values().stream().collect(Collectors.toList()); final PlainActionFuture res = PlainActionFuture.newFuture(); replicationSource = new RemoteStoreReplicationSource(primaryShard); - replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, filesToFetch, replicaShard, res); + replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, filesToFetch, replicaShard, (fileName, bytesRecovered) -> {}, res); GetSegmentFilesResponse response = res.get(); assertEquals(response.files.size(), filesToFetch.size()); assertTrue(response.files.containsAll(filesToFetch)); @@ -104,7 +104,14 @@ public void testGetSegmentFilesAlreadyExists() throws IOException, InterruptedEx try { final PlainActionFuture res = PlainActionFuture.newFuture(); replicationSource = new RemoteStoreReplicationSource(primaryShard); - replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, filesToFetch, primaryShard, res); + replicationSource.getSegmentFiles( + REPLICATION_ID, + checkpoint, + filesToFetch, + primaryShard, + (fileName, bytesRecovered) -> {}, + res + ); res.get(); } catch (AssertionError | ExecutionException ex) { latch.countDown(); @@ -118,7 +125,14 @@ public void testGetSegmentFilesReturnEmptyResponse() throws ExecutionException, final ReplicationCheckpoint checkpoint = primaryShard.getLatestReplicationCheckpoint(); final PlainActionFuture res = PlainActionFuture.newFuture(); replicationSource = new RemoteStoreReplicationSource(primaryShard); - replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, Collections.emptyList(), primaryShard, res); + replicationSource.getSegmentFiles( + REPLICATION_ID, + checkpoint, + Collections.emptyList(), + primaryShard, + (fileName, bytesRecovered) -> {}, + res + ); GetSegmentFilesResponse response = res.get(); assert (response.files.isEmpty()); } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java index dfdb0543daf2a..d586767290797 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceHandlerTests.java @@ -180,6 +180,7 @@ public void onFailure(Exception e) { assertEquals(e.getClass(), OpenSearchException.class); } }); + copyState.decRef(); } public void testReplicationAlreadyRunning() throws IOException { diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java index f2e0baf28ecd1..8f84053f2618e 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java @@ -29,6 +29,7 @@ import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.CopyStateTests; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; @@ -102,7 +103,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 69ec06d27bc61..4fcb9399aa200 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -9,6 +9,7 @@ package org.opensearch.indices.replication; import org.apache.lucene.store.AlreadyClosedException; +import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.cluster.ClusterState; @@ -37,6 +38,7 @@ import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -50,8 +52,9 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; -import static org.junit.Assert.assertEquals; +import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.atLeastOnce; @@ -121,7 +124,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); @@ -212,6 +216,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { Assert.fail("Should not be called"); @@ -245,6 +250,7 @@ public void testAlreadyOnNewCheckpoint() { SegmentReplicationTargetService spy = spy(sut); spy.onNewCheckpoint(replicaShard.getLatestReplicationCheckpoint(), replicaShard); verify(spy, times(0)).startReplication(any(), any(), any()); + verify(spy, times(1)).updateVisibleCheckpoint(NO_OPS_PERFORMED, replicaShard); } @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8928") @@ -277,6 +283,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); @@ -334,6 +341,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { Assert.fail("Unreachable"); @@ -555,7 +563,7 @@ public void testForceSegmentSyncHandlerWithFailure() throws Exception { ).txGet(); }); Throwable nestedException = finalizeException.getCause().getCause(); - assertTrue(nestedException instanceof IOException); + assertNotNull(ExceptionsHelper.unwrap(finalizeException, IOException.class)); assertTrue(nestedException.getMessage().contains("dummy failure")); } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java index 1192796566dcf..3804230942430 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java @@ -40,6 +40,7 @@ import org.opensearch.index.store.StoreTests; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationFailedException; +import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.DummyShardLock; import org.opensearch.test.IndexSettingsModule; @@ -53,6 +54,7 @@ import java.util.List; import java.util.Map; import java.util.Random; +import java.util.function.BiConsumer; import org.mockito.Mockito; @@ -131,10 +133,12 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { assertEquals(1, filesToFetch.size()); assert (filesToFetch.contains(SEGMENT_FILE)); + filesToFetch.forEach(storeFileMetadata -> fileProgressTracker.accept(storeFileMetadata.name(), storeFileMetadata.length())); listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); } }; @@ -149,6 +153,19 @@ public void getSegmentFiles( public void onResponse(Void replicationResponse) { try { verify(spyIndexShard, times(1)).finalizeReplication(any()); + assertEquals( + 1, + segrepTarget.state() + .getIndex() + .fileDetails() + .stream() + .filter(ReplicationLuceneIndex.FileMetadata::fullyRecovered) + .count() + ); + assertEquals( + 0, + segrepTarget.state().getIndex().fileDetails().stream().filter(file -> file.fullyRecovered() == false).count() + ); segrepTarget.markAsDone(); } catch (IOException ex) { Assert.fail(); @@ -182,6 +199,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); @@ -200,6 +218,15 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { + assertEquals( + 0, + segrepTarget.state() + .getIndex() + .fileDetails() + .stream() + .filter(ReplicationLuceneIndex.FileMetadata::fullyRecovered) + .count() + ); assertEquals(exception, e.getCause().getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } @@ -225,6 +252,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onFailure(exception); @@ -243,13 +271,22 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { + assertEquals( + 0, + segrepTarget.state() + .getIndex() + .fileDetails() + .stream() + .filter(ReplicationLuceneIndex.FileMetadata::fullyRecovered) + .count() + ); assertEquals(exception, e.getCause().getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } }); } - public void testFailure_finalizeReplication_IOException() throws IOException { + public void testFailure_finalizeReplication_NonCorruptionException() throws IOException { IOException exception = new IOException("dummy failure"); SegmentReplicationSource segrepSource = new TestReplicationSource() { @@ -268,6 +305,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); @@ -288,6 +326,7 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { + assertEquals(ReplicationFailedException.class, e.getClass()); assertEquals(exception, e.getCause()); segrepTarget.fail(new ReplicationFailedException(e), false); } @@ -313,6 +352,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); @@ -357,6 +397,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); @@ -375,6 +416,15 @@ public void onResponse(Void replicationResponse) { @Override public void onFailure(Exception e) { + assertEquals( + 0, + segrepTarget.state() + .getIndex() + .fileDetails() + .stream() + .filter(ReplicationLuceneIndex.FileMetadata::fullyRecovered) + .count() + ); assertTrue(e instanceof OpenSearchCorruptionException); assertTrue(e.getMessage().contains("has local copies of segments that differ from the primary")); segrepTarget.fail(new ReplicationFailedException(e), false); @@ -409,6 +459,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { listener.onResponse(new GetSegmentFilesResponse(filesToFetch)); diff --git a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java index 401700c2b2ab9..2cf006176022d 100644 --- a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java @@ -25,6 +25,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; @@ -61,7 +62,8 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/ingest/ConfigurationUtilsTests.java b/server/src/test/java/org/opensearch/ingest/ConfigurationUtilsTests.java index cf67c8af139fb..e18e5887c949e 100644 --- a/server/src/test/java/org/opensearch/ingest/ConfigurationUtilsTests.java +++ b/server/src/test/java/org/opensearch/ingest/ConfigurationUtilsTests.java @@ -179,6 +179,16 @@ public void testReadProcessors() throws Exception { assertThat(e2.getMetadata("opensearch.processor_tag"), equalTo(Collections.singletonList("my_second_unknown"))); assertThat(e2.getMetadata("opensearch.processor_type"), equalTo(Collections.singletonList("second_unknown_processor"))); assertThat(e2.getMetadata("opensearch.property_name"), is(nullValue())); + + // test null config + List> config3 = new ArrayList<>(); + config3.add(Collections.singletonMap("null_processor", null)); + + OpenSearchParseException ex = expectThrows( + OpenSearchParseException.class, + () -> ConfigurationUtils.readProcessorConfigs(config3, scriptService, registry) + ); + assertEquals(ex.getMessage(), "the config of processor [null_processor] cannot be null"); } public void testReadProcessorNullDescription() throws Exception { @@ -235,6 +245,12 @@ public void testReadProcessorFromObjectOrMap() throws Exception { () -> ConfigurationUtils.readProcessor(registry, scriptService, "unknown_processor", invalidConfig) ); assertThat(ex.getMessage(), equalTo("property isn't a map, but of type [" + invalidConfig.getClass().getName() + "]")); + + ex = expectThrows( + OpenSearchParseException.class, + () -> ConfigurationUtils.readProcessor(registry, scriptService, "null_processor", null) + ); + assertEquals(ex.getMessage(), "expect the config of processor [null_processor] to be map, but is null"); } public void testNoScriptCompilation() { diff --git a/server/src/test/java/org/opensearch/monitor/fs/DeviceStatsTests.java b/server/src/test/java/org/opensearch/monitor/fs/DeviceStatsTests.java index 0fd039b84e887..0059f8e215f2e 100644 --- a/server/src/test/java/org/opensearch/monitor/fs/DeviceStatsTests.java +++ b/server/src/test/java/org/opensearch/monitor/fs/DeviceStatsTests.java @@ -46,7 +46,12 @@ public void testDeviceStats() { final int sectorsRead = randomIntBetween(8 * readsCompleted, 16 * readsCompleted); final int writesCompleted = randomIntBetween(1, 1 << 16); final int sectorsWritten = randomIntBetween(8 * writesCompleted, 16 * writesCompleted); - + final int readTime = randomIntBetween(1, 1 << 16); + ; + final int writeTime = randomIntBetween(1, 1 << 16); + ; + final int queueSize = randomIntBetween(1, 1 << 16); + final int ioTime = randomIntBetween(1, 1 << 16); FsInfo.DeviceStats previous = new FsInfo.DeviceStats( majorDeviceNumber, minorDeviceNumber, @@ -55,6 +60,10 @@ public void testDeviceStats() { sectorsRead, writesCompleted, sectorsWritten, + readTime, + writeTime, + queueSize, + ioTime, null ); FsInfo.DeviceStats current = new FsInfo.DeviceStats( @@ -65,6 +74,10 @@ public void testDeviceStats() { sectorsRead + 16384, writesCompleted + 2048, sectorsWritten + 32768, + readTime + 500, + writeTime + 100, + queueSize + 20, + ioTime + 8192, previous ); assertThat(current.operations(), equalTo(1024L + 2048L)); @@ -72,6 +85,10 @@ public void testDeviceStats() { assertThat(current.writeOperations(), equalTo(2048L)); assertThat(current.readKilobytes(), equalTo(16384L / 2)); assertThat(current.writeKilobytes(), equalTo(32768L / 2)); + assertEquals(500, current.readTime()); + assertEquals(100, current.writeTime()); + assertEquals(20, current.queueSize()); + assertEquals(8192, current.ioTimeInMillis()); } } diff --git a/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java b/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java index 686a624d988d7..59a888c665be7 100644 --- a/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java +++ b/server/src/test/java/org/opensearch/monitor/fs/FsProbeTests.java @@ -91,6 +91,14 @@ public void testFsInfo() throws IOException { assertThat(deviceStats.previousWritesCompleted, equalTo(-1L)); assertThat(deviceStats.currentSectorsWritten, greaterThanOrEqualTo(0L)); assertThat(deviceStats.previousSectorsWritten, equalTo(-1L)); + assertThat(deviceStats.currentReadTime, greaterThanOrEqualTo(0L)); + assertThat(deviceStats.previousReadTime, greaterThanOrEqualTo(-1L)); + assertThat(deviceStats.currentWriteTime, greaterThanOrEqualTo(0L)); + assertThat(deviceStats.previousWriteTime, greaterThanOrEqualTo(-1L)); + assertThat(deviceStats.currentQueueSize, greaterThanOrEqualTo(0L)); + assertThat(deviceStats.previousQueueSize, greaterThanOrEqualTo(-1L)); + assertThat(deviceStats.currentIOTime, greaterThanOrEqualTo(0L)); + assertThat(deviceStats.previousIOTime, greaterThanOrEqualTo(-1L)); } } else { assertNull(stats.getIoStats()); @@ -243,6 +251,16 @@ List readProcDiskStats() throws IOException { assertThat(first.devicesStats[0].previousWritesCompleted, equalTo(-1L)); assertThat(first.devicesStats[0].currentSectorsWritten, equalTo(118857776L)); assertThat(first.devicesStats[0].previousSectorsWritten, equalTo(-1L)); + + assertEquals(33457, first.devicesStats[0].currentReadTime); + assertEquals(-1, first.devicesStats[0].previousReadTime); + assertEquals(18730966, first.devicesStats[0].currentWriteTime); + assertEquals(-1, first.devicesStats[0].previousWriteTime); + assertEquals(18767169, first.devicesStats[0].currentQueueSize); + assertEquals(-1, first.devicesStats[0].previousQueueSize); + assertEquals(1918440, first.devicesStats[0].currentIOTime); + assertEquals(-1, first.devicesStats[0].previousIOTime); + assertThat(first.devicesStats[1].majorDeviceNumber, equalTo(253)); assertThat(first.devicesStats[1].minorDeviceNumber, equalTo(2)); assertThat(first.devicesStats[1].deviceName, equalTo("dm-2")); @@ -255,6 +273,15 @@ List readProcDiskStats() throws IOException { assertThat(first.devicesStats[1].currentSectorsWritten, equalTo(64126096L)); assertThat(first.devicesStats[1].previousSectorsWritten, equalTo(-1L)); + assertEquals(49312, first.devicesStats[1].currentReadTime); + assertEquals(-1, first.devicesStats[1].previousReadTime); + assertEquals(33730596, first.devicesStats[1].currentWriteTime); + assertEquals(-1, first.devicesStats[1].previousWriteTime); + assertEquals(33781827, first.devicesStats[1].currentQueueSize); + assertEquals(-1, first.devicesStats[1].previousQueueSize); + assertEquals(1058193, first.devicesStats[1].currentIOTime); + assertEquals(-1, first.devicesStats[1].previousIOTime); + diskStats.set( Arrays.asList( " 259 0 nvme0n1 336870 0 7928397 82876 10264393 0 182986405 52451610 0 2971042 52536492", @@ -281,6 +308,16 @@ List readProcDiskStats() throws IOException { assertThat(second.devicesStats[0].previousWritesCompleted, equalTo(8398869L)); assertThat(second.devicesStats[0].currentSectorsWritten, equalTo(118857776L)); assertThat(second.devicesStats[0].previousSectorsWritten, equalTo(118857776L)); + + assertEquals(33464, second.devicesStats[0].currentReadTime); + assertEquals(33457, second.devicesStats[0].previousReadTime); + assertEquals(18730966, second.devicesStats[0].currentWriteTime); + assertEquals(18730966, second.devicesStats[0].previousWriteTime); + assertEquals(18767176, second.devicesStats[0].currentQueueSize); + assertEquals(18767169, second.devicesStats[0].previousQueueSize); + assertEquals(1918444, second.devicesStats[0].currentIOTime); + assertEquals(1918440, second.devicesStats[0].previousIOTime); + assertThat(second.devicesStats[1].majorDeviceNumber, equalTo(253)); assertThat(second.devicesStats[1].minorDeviceNumber, equalTo(2)); assertThat(second.devicesStats[1].deviceName, equalTo("dm-2")); @@ -293,11 +330,25 @@ List readProcDiskStats() throws IOException { assertThat(second.devicesStats[1].currentSectorsWritten, equalTo(64128568L)); assertThat(second.devicesStats[1].previousSectorsWritten, equalTo(64126096L)); + assertEquals(49369, second.devicesStats[1].currentReadTime); + assertEquals(49312, second.devicesStats[1].previousReadTime); + assertEquals(33730766, second.devicesStats[1].currentWriteTime); + assertEquals(33730596, second.devicesStats[1].previousWriteTime); + assertEquals(33781827, first.devicesStats[1].currentQueueSize); + assertEquals(-1L, first.devicesStats[1].previousQueueSize); + assertEquals(1058193, first.devicesStats[1].currentIOTime); + assertEquals(-1L, first.devicesStats[1].previousIOTime); + assertThat(second.totalOperations, equalTo(575L)); assertThat(second.totalReadOperations, equalTo(261L)); assertThat(second.totalWriteOperations, equalTo(314L)); assertThat(second.totalReadKilobytes, equalTo(2392L)); assertThat(second.totalWriteKilobytes, equalTo(1236L)); + + assertEquals(64, second.totalReadTime); + assertEquals(170, second.totalWriteTime); + assertEquals(236, second.totalQueueSize); + assertEquals(158, second.totalIOTimeInMillis); } public void testAdjustForHugeFilesystems() throws Exception { diff --git a/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java b/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java new file mode 100644 index 0000000000000..c4ba271d27ae9 --- /dev/null +++ b/server/src/test/java/org/opensearch/node/RemoteStoreNodeAttributeTests.java @@ -0,0 +1,151 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.Version; +import org.opensearch.cluster.metadata.CryptoMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.test.OpenSearchTestCase; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.Locale; +import java.util.Map; + +import static java.util.Collections.emptySet; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; + +public class RemoteStoreNodeAttributeTests extends OpenSearchTestCase { + + static private final String KEY_ARN = "arn:aws:kms:us-east-1:123456789:key/6e9aa906-2cc3-4924-8ded-f385c78d9dcf"; + static private final String REGION = "us-east-1"; + + public void testCryptoMetadata() throws UnknownHostException { + String repoName = "remote-store-A"; + String repoTypeSettingKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, repoName); + String repoSettingsKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, repoName); + String repoCryptoMetadataKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT, repoName); + String repoCryptoMetadataSettingsKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX, repoName); + Map attr = Map.of( + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + repoTypeSettingKey, + "s3", + repoSettingsKey, + "abc", + repoSettingsKey + "base_path", + "xyz", + repoCryptoMetadataKey + ".key_provider_name", + "store-test", + repoCryptoMetadataKey + ".key_provider_type", + "aws-kms", + repoCryptoMetadataSettingsKey + ".region", + REGION, + repoCryptoMetadataSettingsKey + ".key_arn", + KEY_ARN + ); + DiscoveryNode node = new DiscoveryNode( + "C", + new TransportAddress(InetAddress.getByName("localhost"), 9876), + attr, + emptySet(), + Version.CURRENT + ); + + RemoteStoreNodeAttribute remoteStoreNodeAttribute = new RemoteStoreNodeAttribute(node); + assertEquals(remoteStoreNodeAttribute.getRepositoriesMetadata().repositories().size(), 1); + RepositoryMetadata repositoryMetadata = remoteStoreNodeAttribute.getRepositoriesMetadata().repositories().get(0); + Settings.Builder settings = Settings.builder(); + settings.put("region", REGION); + settings.put("key_arn", KEY_ARN); + CryptoMetadata cryptoMetadata = new CryptoMetadata("store-test", "aws-kms", settings.build()); + assertEquals(cryptoMetadata, repositoryMetadata.cryptoMetadata()); + } + + public void testInvalidCryptoMetadata() throws UnknownHostException { + String repoName = "remote-store-A"; + String repoTypeSettingKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, repoName); + String repoSettingsKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, repoName); + String repoCryptoMetadataKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_CRYPTO_ATTRIBUTE_KEY_FORMAT, repoName); + String repoCryptoMetadataSettingsKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_CRYPTO_SETTINGS_PREFIX, repoName); + Map attr = Map.of( + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + repoTypeSettingKey, + "s3", + repoSettingsKey, + "abc", + repoSettingsKey + "base_path", + "xyz", + repoCryptoMetadataSettingsKey + ".region", + REGION, + repoCryptoMetadataSettingsKey + ".key_arn", + KEY_ARN + ); + DiscoveryNode node = new DiscoveryNode( + "C", + new TransportAddress(InetAddress.getByName("localhost"), 9876), + attr, + emptySet(), + Version.CURRENT + ); + + assertThrows(IllegalStateException.class, () -> new RemoteStoreNodeAttribute(node)); + } + + public void testNoCryptoMetadata() throws UnknownHostException { + String repoName = "remote-store-A"; + String repoTypeSettingKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, repoName); + String repoSettingsKey = String.format(Locale.ROOT, REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, repoName); + Map attr = Map.of( + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + repoName, + repoTypeSettingKey, + "s3", + repoSettingsKey, + "abc", + repoSettingsKey + "base_path", + "xyz" + ); + DiscoveryNode node = new DiscoveryNode( + "C", + new TransportAddress(InetAddress.getByName("localhost"), 9876), + attr, + emptySet(), + Version.CURRENT + ); + + RemoteStoreNodeAttribute remoteStoreNodeAttribute = new RemoteStoreNodeAttribute(node); + assertEquals(remoteStoreNodeAttribute.getRepositoriesMetadata().repositories().size(), 1); + RepositoryMetadata repositoryMetadata = remoteStoreNodeAttribute.getRepositoriesMetadata().repositories().get(0); + assertNull(repositoryMetadata.cryptoMetadata()); + } +} diff --git a/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java b/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java new file mode 100644 index 0000000000000..b2fa884afab69 --- /dev/null +++ b/server/src/test/java/org/opensearch/node/ResourceUsageCollectorServiceTests.java @@ -0,0 +1,190 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node; + +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.node.resource.tracker.NodeResourceUsageTracker; +import org.opensearch.node.resource.tracker.ResourceTrackerSettings; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.hamcrest.Matchers.greaterThan; + +/** + * Tests for ResourceUsageCollectorService where we test collect method, get method and whether schedulers + * are working as expected + */ +public class ResourceUsageCollectorServiceTests extends OpenSearchSingleNodeTestCase { + + private ClusterService clusterService; + private ResourceUsageCollectorService collector; + private ThreadPool threadpool; + NodeResourceUsageTracker tracker; + + @Before + public void setUp() throws Exception { + super.setUp(); + + threadpool = new TestThreadPool("resource_usage_collector_tests"); + + clusterService = createClusterService(threadpool); + + Settings settings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), new TimeValue(500, TimeUnit.MILLISECONDS)) + .build(); + tracker = new NodeResourceUsageTracker( + threadpool, + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + collector = new ResourceUsageCollectorService(tracker, clusterService, threadpool); + tracker.start(); + collector.start(); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + threadpool.shutdownNow(); + clusterService.close(); + collector.stop(); + tracker.stop(); + collector.close(); + tracker.close(); + } + + public void testResourceUsageStats() { + collector.collectNodeResourceUsageStats("node1", System.currentTimeMillis(), 97, 99); + Map nodeStats = collector.getAllNodeStatistics(); + assertTrue(nodeStats.containsKey("node1")); + assertEquals(99.0, nodeStats.get("node1").cpuUtilizationPercent, 0.0); + assertEquals(97.0, nodeStats.get("node1").memoryUtilizationPercent, 0.0); + + Optional nodeResourceUsageStatsOptional = collector.getNodeStatistics("node1"); + + assertNotNull(nodeResourceUsageStatsOptional.get()); + assertEquals(99.0, nodeResourceUsageStatsOptional.get().cpuUtilizationPercent, 0.0); + assertEquals(97.0, nodeResourceUsageStatsOptional.get().memoryUtilizationPercent, 0.0); + + nodeResourceUsageStatsOptional = collector.getNodeStatistics("node2"); + assertTrue(nodeResourceUsageStatsOptional.isEmpty()); + } + + public void testScheduler() throws Exception { + /** + * Wait for cluster state to be ready so that localNode().getId() is ready and we add the values to the map + */ + assertBusy(() -> assertTrue(collector.getNodeStatistics(clusterService.localNode().getId()).isPresent()), 1, TimeUnit.MINUTES); + assertTrue(collector.getNodeStatistics(clusterService.localNode().getId()).isPresent()); + /** + * Wait for memory utilization to be reported greater than 0 + */ + assertBusy( + () -> assertThat( + collector.getNodeStatistics(clusterService.localNode().getId()).get().getMemoryUtilizationPercent(), + greaterThan(0.0) + ), + 5, + TimeUnit.SECONDS + ); + assertTrue(collector.getNodeStatistics("Invalid").isEmpty()); + } + + /* + * Test that concurrently adding values and removing nodes does not cause exceptions + */ + public void testConcurrentAddingAndRemovingNodes() throws Exception { + String[] nodes = new String[] { "a", "b", "c", "d" }; + + final CountDownLatch latch = new CountDownLatch(5); + + Runnable f = () -> { + latch.countDown(); + try { + latch.await(); + } catch (InterruptedException e) { + fail("should not be interrupted"); + } + for (int i = 0; i < randomIntBetween(100, 200); i++) { + if (randomBoolean()) { + collector.removeNodeResourceUsageStats(randomFrom(nodes)); + } + collector.collectNodeResourceUsageStats( + randomFrom(nodes), + System.currentTimeMillis(), + randomIntBetween(1, 100), + randomIntBetween(1, 100) + ); + } + }; + + Thread t1 = new Thread(f); + Thread t2 = new Thread(f); + Thread t3 = new Thread(f); + Thread t4 = new Thread(f); + + t1.start(); + t2.start(); + t3.start(); + t4.start(); + latch.countDown(); + t1.join(); + t2.join(); + t3.join(); + t4.join(); + + final Map nodeStats = collector.getAllNodeStatistics(); + for (String nodeId : nodes) { + if (nodeStats.containsKey(nodeId)) { + assertThat(nodeStats.get(nodeId).memoryUtilizationPercent, greaterThan(0.0)); + assertThat(nodeStats.get(nodeId).cpuUtilizationPercent, greaterThan(0.0)); + } + } + } + + public void testNodeRemoval() { + collector.collectNodeResourceUsageStats("node1", System.currentTimeMillis(), randomIntBetween(1, 100), randomIntBetween(1, 100)); + collector.collectNodeResourceUsageStats("node2", System.currentTimeMillis(), randomIntBetween(1, 100), randomIntBetween(1, 100)); + + ClusterState previousState = ClusterState.builder(new ClusterName("cluster")) + .nodes( + DiscoveryNodes.builder() + .add(DiscoveryNode.createLocal(Settings.EMPTY, new TransportAddress(TransportAddress.META_ADDRESS, 9200), "node1")) + .add(DiscoveryNode.createLocal(Settings.EMPTY, new TransportAddress(TransportAddress.META_ADDRESS, 9201), "node2")) + ) + .build(); + ClusterState newState = ClusterState.builder(previousState) + .nodes(DiscoveryNodes.builder(previousState.nodes()).remove("node2")) + .build(); + ClusterChangedEvent event = new ClusterChangedEvent("test", newState, previousState); + + collector.clusterChanged(event); + final Map nodeStats = collector.getAllNodeStatistics(); + assertTrue(nodeStats.containsKey("node1")); + assertFalse(nodeStats.containsKey("node2")); + } +} diff --git a/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java b/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java new file mode 100644 index 0000000000000..374c993a264d4 --- /dev/null +++ b/server/src/test/java/org/opensearch/node/resource/tracker/AverageUsageTrackerTests.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.concurrent.TimeUnit; + +/** + * Tests to validate AverageMemoryUsageTracker and AverageCpuUsageTracker implementation + */ +public class AverageUsageTrackerTests extends OpenSearchTestCase { + ThreadPool threadPool; + AverageMemoryUsageTracker averageMemoryUsageTracker; + AverageCpuUsageTracker averageCpuUsageTracker; + + @Before + public void setup() { + threadPool = new TestThreadPool(getClass().getName()); + averageMemoryUsageTracker = new AverageMemoryUsageTracker( + threadPool, + new TimeValue(500, TimeUnit.MILLISECONDS), + new TimeValue(1000, TimeUnit.MILLISECONDS) + ); + averageCpuUsageTracker = new AverageCpuUsageTracker( + threadPool, + new TimeValue(500, TimeUnit.MILLISECONDS), + new TimeValue(1000, TimeUnit.MILLISECONDS) + ); + } + + @After + public void cleanup() { + ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); + } + + public void testBasicUsage() { + + assertAverageUsageStats(averageMemoryUsageTracker); + assertAverageUsageStats(averageCpuUsageTracker); + } + + public void testUpdateWindowSize() { + assertUpdateWindowSize(averageMemoryUsageTracker); + assertUpdateWindowSize(averageCpuUsageTracker); + } + + private void assertAverageUsageStats(AbstractAverageUsageTracker usageTracker) { + usageTracker.recordUsage(1); + assertFalse(usageTracker.isReady()); + usageTracker.recordUsage(2); + assertTrue(usageTracker.isReady()); + assertEquals(2, usageTracker.getWindowSize()); + assertEquals(1.5, usageTracker.getAverage(), 0.0); + usageTracker.recordUsage(5); + // ( 2 + 5 ) / 2 = 3.5 + assertEquals(3.5, usageTracker.getAverage(), 0.0); + } + + private void assertUpdateWindowSize(AbstractAverageUsageTracker usageTracker) { + usageTracker.recordUsage(1); + usageTracker.recordUsage(2); + + assertEquals(2, usageTracker.getWindowSize()); + assertEquals(1.5, usageTracker.getAverage(), 0.0); + usageTracker.recordUsage(5); + // ( 2 + 5 ) / 2 = 3.5 + assertEquals(3.5, usageTracker.getAverage(), 0.0); + + usageTracker.setWindowSize(new TimeValue(2000, TimeUnit.MILLISECONDS)); + assertEquals(0, usageTracker.getWindowSize()); + assertEquals(0.0, usageTracker.getAverage(), 0.0); + // verify 2000/500 = 4 is the window size and average is calculated on window size of 4 + usageTracker.recordUsage(1); + usageTracker.recordUsage(2); + usageTracker.recordUsage(1); + assertFalse(usageTracker.isReady()); + usageTracker.recordUsage(2); + assertTrue(usageTracker.isReady()); + assertEquals(4, usageTracker.getWindowSize()); + // (1 + 2 + 1 + 2 ) / 4 = 1.5 + assertEquals(1.5, usageTracker.getAverage(), 0.0); + usageTracker.recordUsage(2); + assertTrue(usageTracker.isReady()); + // ( 2 + 1 + 2 + 2 ) / 4 = 1.75 + assertEquals(1.75, usageTracker.getAverage(), 0.0); + } +} diff --git a/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java b/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java new file mode 100644 index 0000000000000..1ce68b9f29062 --- /dev/null +++ b/server/src/test/java/org/opensearch/node/resource/tracker/NodeResourceUsageTrackerTests.java @@ -0,0 +1,96 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.resource.tracker; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.util.concurrent.TimeUnit; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.greaterThan; + +/** + * Tests to assert resource usage trackers retrieving resource utilization averages + */ +public class NodeResourceUsageTrackerTests extends OpenSearchSingleNodeTestCase { + ThreadPool threadPool; + + @Before + public void setup() { + threadPool = new TestThreadPool(getClass().getName()); + } + + @After + public void cleanup() { + ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + } + + public void testStats() throws Exception { + Settings settings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), new TimeValue(500, TimeUnit.MILLISECONDS)) + .build(); + NodeResourceUsageTracker tracker = new NodeResourceUsageTracker( + threadPool, + settings, + new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + tracker.start(); + /** + * Asserting memory utilization to be greater than 0 + * cpu percent used is mostly 0, so skipping assertion for that + */ + assertBusy(() -> assertThat(tracker.getMemoryUtilizationPercent(), greaterThan(0.0)), 5, TimeUnit.SECONDS); + tracker.stop(); + tracker.close(); + } + + public void testUpdateSettings() { + NodeResourceUsageTracker tracker = new NodeResourceUsageTracker( + threadPool, + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + assertEquals(tracker.getResourceTrackerSettings().getCpuWindowDuration().getSeconds(), 30); + assertEquals(tracker.getResourceTrackerSettings().getMemoryWindowDuration().getSeconds(), 30); + + Settings settings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), "10s") + .build(); + ClusterUpdateSettingsResponse response = client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).get(); + assertEquals( + "10s", + response.getPersistentSettings().get(ResourceTrackerSettings.GLOBAL_CPU_USAGE_AC_WINDOW_DURATION_SETTING.getKey()) + ); + + Settings jvmsettings = Settings.builder() + .put(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey(), "5s") + .build(); + response = client().admin().cluster().prepareUpdateSettings().setPersistentSettings(jvmsettings).get(); + assertEquals( + "5s", + response.getPersistentSettings().get(ResourceTrackerSettings.GLOBAL_JVM_USAGE_AC_WINDOW_DURATION_SETTING.getKey()) + ); + } +} diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index b1274d61b4608..1cc4e8aa7ebee 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -34,21 +34,31 @@ import org.apache.lucene.index.IndexCommit; import org.opensearch.Version; +import org.opensearch.action.admin.cluster.crypto.CryptoSettings; import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.ack.ClusterStateUpdateResponse; +import org.opensearch.cluster.metadata.CryptoMetadata; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.crypto.CryptoHandler; +import org.opensearch.common.crypto.DecryptedRangedStreamProvider; +import org.opensearch.common.crypto.EncryptedHeaderContentSupplier; +import org.opensearch.common.crypto.MasterKeyProvider; +import org.opensearch.common.io.InputStreamContainer; import org.opensearch.common.lifecycle.Lifecycle; import org.opensearch.common.lifecycle.LifecycleListener; import org.opensearch.common.settings.Settings; @@ -63,32 +73,51 @@ import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.plugins.CryptoPlugin; import org.opensearch.repositories.blobstore.MeteredBlobStoreRepository; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportService; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.Function; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class RepositoriesServiceTests extends OpenSearchTestCase { private RepositoriesService repositoriesService; + private final String kpTypeA = "kp-type-a"; + private final String kpTypeB = "kp-type-b"; @Override public void setUp() throws Exception { super.setUp(); + ThreadPool threadPool = mock(ThreadPool.class); + final ClusterApplierService clusterApplierService = mock(ClusterApplierService.class); + when(clusterApplierService.threadPool()).thenReturn(threadPool); + final ClusterService clusterService = mock(ClusterService.class); + repositoriesService = createRepositoriesServiceWithMockedClusterService(clusterService); + } + + private RepositoriesService createRepositoriesServiceWithMockedClusterService(ClusterService clusterService) { ThreadPool threadPool = mock(ThreadPool.class); final TransportService transportService = new TransportService( Settings.EMPTY, @@ -97,11 +126,11 @@ public void setUp() throws Exception { TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> DiscoveryNode.createLocal(Settings.EMPTY, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); final ClusterApplierService clusterApplierService = mock(ClusterApplierService.class); when(clusterApplierService.threadPool()).thenReturn(threadPool); - final ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterApplierService()).thenReturn(clusterApplierService); Map typesRegistry = Map.of( TestRepository.TYPE, @@ -111,15 +140,24 @@ public void setUp() throws Exception { MeteredRepositoryTypeB.TYPE, metadata -> new MeteredRepositoryTypeB(metadata, clusterService) ); - repositoriesService = new RepositoriesService( + + DiscoveryNodes nodes = mock(DiscoveryNodes.class); + when(nodes.getMinNodeVersion()).thenReturn(Version.V_2_9_0); + ClusterState currentClusterState = mock(ClusterState.class); + when(currentClusterState.getNodes()).thenReturn(nodes); + when(clusterService.state()).thenReturn(currentClusterState); + + RepositoriesService repositoriesService = new RepositoriesService( Settings.EMPTY, - mock(ClusterService.class), + clusterService, transportService, typesRegistry, typesRegistry, threadPool ); + repositoriesService.start(); + return repositoriesService; } public void testRegisterInternalRepository() { @@ -164,6 +202,13 @@ public void testRegisterRejectsInvalidRepositoryNames() { } } + public void testUpdateOrRegisterRejectsForSystemRepository() { + String repoName = "name"; + PutRepositoryRequest request = new PutRepositoryRequest(repoName); + request.settings(Settings.builder().put(SYSTEM_REPOSITORY_SETTING.getKey(), true).build()); + expectThrows(RepositoryException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); + } + public void testRepositoriesStatsCanHaveTheSameNameAndDifferentTypeOverTime() { String repoName = "name"; expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(repoName)); @@ -174,20 +219,265 @@ public void testRepositoriesStatsCanHaveTheSameNameAndDifferentTypeOverTime() { assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", emptyState(), clusterStateWithRepoTypeA)); - assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); + assertThat(repositoriesService.repositoriesStats().size(), equalTo(0)); ClusterState clusterStateWithRepoTypeB = createClusterStateWithRepo(repoName, MeteredRepositoryTypeB.TYPE); repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeB, emptyState())); List repositoriesStats = repositoriesService.repositoriesStats(); - assertThat(repositoriesStats.size(), equalTo(2)); + assertThat(repositoriesStats.size(), equalTo(1)); RepositoryStatsSnapshot repositoryStatsTypeA = repositoriesStats.get(0); - assertThat(repositoryStatsTypeA.getRepositoryInfo().type, equalTo(MeteredRepositoryTypeA.TYPE)); - assertThat(repositoryStatsTypeA.getRepositoryStats(), equalTo(MeteredRepositoryTypeA.STATS)); + assertThat(repositoryStatsTypeA.getRepositoryInfo().type, equalTo(MeteredRepositoryTypeB.TYPE)); + assertThat(repositoryStatsTypeA.getRepositoryStats(), equalTo(MeteredRepositoryTypeB.STATS)); - RepositoryStatsSnapshot repositoryStatsTypeB = repositoriesStats.get(1); - assertThat(repositoryStatsTypeB.getRepositoryInfo().type, equalTo(MeteredRepositoryTypeB.TYPE)); - assertThat(repositoryStatsTypeB.getRepositoryStats(), equalTo(MeteredRepositoryTypeB.STATS)); + } + + public void testWithSameKeyProviderNames() { + String keyProviderName = "kp-name"; + ClusterState clusterStateWithRepoTypeA = createClusterStateWithKeyProvider( + "repoName", + MeteredRepositoryTypeA.TYPE, + keyProviderName, + kpTypeA + ); + + repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeA, emptyState())); + assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); + MeteredRepositoryTypeA repository = (MeteredRepositoryTypeA) repositoriesService.repository("repoName"); + assertNotNull(repository); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + + ClusterState clusterStateWithRepoTypeB = createClusterStateWithKeyProvider( + "repoName", + MeteredRepositoryTypeB.TYPE, + keyProviderName, + kpTypeA + ); + repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeB, emptyState())); + assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); + MeteredRepositoryTypeB repositoryB = (MeteredRepositoryTypeB) repositoriesService.repository("repoName"); + assertNotNull(repositoryB); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + } + + public void testCryptoManagersUnchangedWithSameCryptoMetadata() { + String keyProviderName = "kp-name"; + ClusterState clusterStateWithRepoTypeA = createClusterStateWithKeyProvider( + "repoName", + MeteredRepositoryTypeA.TYPE, + keyProviderName, + kpTypeA + ); + repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeA, emptyState())); + assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); + MeteredRepositoryTypeA repository = (MeteredRepositoryTypeA) repositoriesService.repository("repoName"); + assertNotNull(repository); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + + repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeA, emptyState())); + assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); + repository = (MeteredRepositoryTypeA) repositoriesService.repository("repoName"); + assertNotNull(repository); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + } + + public void testRepositoryUpdateWithDifferentCryptoMetadata() { + String keyProviderName = "kp-name"; + + ClusterState clusterStateWithRepoTypeA = createClusterStateWithKeyProvider( + "repoName", + MeteredRepositoryTypeA.TYPE, + keyProviderName, + kpTypeA + ); + ClusterService clusterService = mock(ClusterService.class); + + PutRepositoryRequest request = new PutRepositoryRequest("repoName"); + request.type(MeteredRepositoryTypeA.TYPE); + request.settings(Settings.EMPTY); + + doAnswer((invocation) -> { + AckedClusterStateUpdateTask task = (AckedClusterStateUpdateTask< + ClusterStateUpdateResponse>) invocation.getArguments()[1]; + task.execute(clusterStateWithRepoTypeA); + return null; + }).when(clusterService).submitStateUpdateTask(any(), any()); + + RepositoriesService repositoriesService = createRepositoriesServiceWithMockedClusterService(clusterService); + repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeA, emptyState())); + assertThat(repositoriesService.repositoriesStats().size(), equalTo(1)); + MeteredRepositoryTypeA repository = (MeteredRepositoryTypeA) repositoriesService.repository("repoName"); + assertNotNull(repository); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + + expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); + + CryptoSettings cryptoSettings = new CryptoSettings(keyProviderName); + cryptoSettings.keyProviderType(kpTypeA); + cryptoSettings.settings(Settings.builder().put("key-1", "val-1")); + request.cryptoSettings(cryptoSettings); + expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); + + cryptoSettings.settings(Settings.builder()); + cryptoSettings.keyProviderName("random"); + expectThrows(IllegalArgumentException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); + + cryptoSettings.keyProviderName(keyProviderName); + + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + repositoriesService.registerOrUpdateRepository(request, null); + } + + public void testCryptoManagerClusterStateChanges() { + + ClusterService clusterService = mock(ClusterService.class); + AtomicBoolean verified = new AtomicBoolean(); + List repositoryMetadata = new ArrayList<>(); + + String keyProviderName = "kp-name-1"; + String repoName = "repoName"; + String keyProviderType = kpTypeA; + Settings.Builder settings = Settings.builder(); + PutRepositoryRequest request = createPutRepositoryEncryptedRequest( + repoName, + MeteredRepositoryTypeA.TYPE, + keyProviderName, + settings, + keyProviderType + ); + verified.set(false); + RepositoriesService repositoriesService = createRepositoriesServiceAndMockCryptoClusterState( + clusterService, + repoName, + keyProviderName, + keyProviderType, + settings.build(), + verified, + repositoryMetadata + ); + repositoriesService.registerOrUpdateRepository(request, null); + MeteredRepositoryTypeA repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + assertTrue(verified.get()); + + // No change + keyProviderType = kpTypeA; + settings = Settings.builder(); + request = createPutRepositoryEncryptedRequest(repoName, MeteredRepositoryTypeA.TYPE, keyProviderName, settings, keyProviderType); + verified.set(false); + repositoriesService = createRepositoriesServiceAndMockCryptoClusterState( + clusterService, + repoName, + keyProviderName, + keyProviderType, + settings.build(), + verified, + repositoryMetadata + ); + repositoriesService.registerOrUpdateRepository(request, null); + + repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + assertTrue(verified.get()); + + // Same crypto client in new repo + repoName = "repoName-2"; + keyProviderType = kpTypeA; + settings = Settings.builder(); + request = createPutRepositoryEncryptedRequest(repoName, MeteredRepositoryTypeA.TYPE, keyProviderName, settings, keyProviderType); + verified.set(false); + repositoriesService = createRepositoriesServiceAndMockCryptoClusterState( + clusterService, + repoName, + keyProviderName, + keyProviderType, + settings.build(), + verified, + repositoryMetadata + ); + repositoriesService.registerOrUpdateRepository(request, null); + repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeA, repository.cryptoHandler.kpType); + assertTrue(verified.get()); + + // Different crypto client in new repo + repoName = "repoName-3"; + keyProviderType = kpTypeB; + settings = Settings.builder(); + request = createPutRepositoryEncryptedRequest(repoName, MeteredRepositoryTypeA.TYPE, keyProviderName, settings, keyProviderType); + verified.set(false); + repositoriesService = createRepositoriesServiceAndMockCryptoClusterState( + clusterService, + repoName, + keyProviderName, + keyProviderType, + settings.build(), + verified, + repositoryMetadata + ); + repositoriesService.registerOrUpdateRepository(request, null); + repository = (MeteredRepositoryTypeA) repositoriesService.repository(repoName); + assertNotNull(repository.cryptoHandler); + assertEquals(kpTypeB, repository.cryptoHandler.kpType); + assertTrue(verified.get()); + + } + + private RepositoriesService createRepositoriesServiceAndMockCryptoClusterState( + ClusterService clusterService, + String repoName, + String keyProviderName, + String keyProviderType, + Settings settings, + AtomicBoolean verified, + List repositoryMetadataList + ) { + + ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); + CryptoMetadata newCryptoMetadata = new CryptoMetadata(keyProviderName, keyProviderType, Settings.EMPTY); + Metadata.Builder mdBuilder = Metadata.builder(); + + RepositoryMetadata newRepositoryMetadata = new RepositoryMetadata( + repoName, + MeteredRepositoryTypeA.TYPE, + Settings.EMPTY, + newCryptoMetadata + ); + if (!repositoryMetadataList.contains(newRepositoryMetadata)) { + repositoryMetadataList.add(newRepositoryMetadata); + } + RepositoriesMetadata newRepositoriesMetadata = new RepositoriesMetadata(repositoryMetadataList); + mdBuilder.putCustom(RepositoriesMetadata.TYPE, newRepositoriesMetadata); + state.metadata(mdBuilder); + ClusterState clusterStateWithRepoTypeA = state.build(); + + RepositoriesService repositoriesService = createRepositoriesServiceWithMockedClusterService(clusterService); + + doAnswer((invocation) -> { + AckedClusterStateUpdateTask task = (AckedClusterStateUpdateTask< + ClusterStateUpdateResponse>) invocation.getArguments()[1]; + ClusterState clusterState = task.execute(clusterStateWithRepoTypeA); + RepositoriesMetadata repositories = clusterState.metadata().custom(RepositoriesMetadata.TYPE); + RepositoryMetadata repositoryMetadata = repositories.repositories().get(repositoryMetadataList.size() - 1); + CryptoMetadata cryptoMetadata = repositoryMetadata.cryptoMetadata(); + assertNotNull(cryptoMetadata); + assertEquals(keyProviderName, cryptoMetadata.keyProviderName()); + assertEquals(keyProviderType, cryptoMetadata.keyProviderType()); + assertEquals(cryptoMetadata.settings(), settings); + verified.set(true); + repositoriesService.applyClusterState(new ClusterChangedEvent("new repo", clusterStateWithRepoTypeA, emptyState())); + return null; + }).when(clusterService).submitStateUpdateTask(any(), any()); + + return repositoriesService; } private ClusterState createClusterStateWithRepo(String repoName, String repoType) { @@ -202,13 +492,136 @@ private ClusterState createClusterStateWithRepo(String repoName, String repoType return state.build(); } + private ClusterState createClusterStateWithKeyProvider( + String repoName, + String repoType, + String keyProviderName, + String keyProviderType + ) { + ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); + Metadata.Builder mdBuilder = Metadata.builder(); + CryptoMetadata cryptoMetadata = new CryptoMetadata(keyProviderName, keyProviderType, Settings.EMPTY); + mdBuilder.putCustom( + RepositoriesMetadata.TYPE, + new RepositoriesMetadata(Collections.singletonList(new RepositoryMetadata(repoName, repoType, Settings.EMPTY, cryptoMetadata))) + ); + state.metadata(mdBuilder); + + return state.build(); + } + + private PutRepositoryRequest createPutRepositoryEncryptedRequest( + String repoName, + String repoType, + String keyProviderName, + Settings.Builder settings, + String keyProviderType + ) { + PutRepositoryRequest repositoryRequest = new PutRepositoryRequest(repoName); + repositoryRequest.type(repoType); + repositoryRequest.settings(Settings.EMPTY); + CryptoSettings cryptoSettings = new CryptoSettings(keyProviderName); + cryptoSettings.keyProviderName(keyProviderName); + cryptoSettings.keyProviderType(keyProviderType); + cryptoSettings.settings(settings); + repositoryRequest.cryptoSettings(cryptoSettings); + + return repositoryRequest; + } + private ClusterState emptyState() { return ClusterState.builder(new ClusterName("test")).build(); } private void assertThrowsOnRegister(String repoName) { PutRepositoryRequest request = new PutRepositoryRequest(repoName); - expectThrows(RepositoryException.class, () -> repositoriesService.registerRepository(request, null)); + expectThrows(RepositoryException.class, () -> repositoriesService.registerOrUpdateRepository(request, null)); + } + + private static class TestCryptoProvider implements CryptoHandler { + final String kpName; + final String kpType; + + public TestCryptoProvider(String kpName, String kpType) { + this.kpName = kpName; + this.kpType = kpType; + } + + @Override + public Object initEncryptionMetadata() { + return new Object(); + } + + @Override + public long adjustContentSizeForPartialEncryption(Object cryptoContextObj, long contentSize) { + return 0; + } + + @Override + public long estimateEncryptedLengthOfEntireContent(Object cryptoContextObj, long contentLength) { + return 0; + } + + @Override + public InputStreamContainer createEncryptingStream(Object encryptionMetadata, InputStreamContainer streamContainer) { + return null; + } + + @Override + public InputStreamContainer createEncryptingStreamOfPart( + Object cryptoContextObj, + InputStreamContainer stream, + int totalStreams, + int streamIdx + ) { + return null; + } + + @Override + public InputStream createDecryptingStream(InputStream encryptingStream) { + return null; + } + + @Override + public Object loadEncryptionMetadata(EncryptedHeaderContentSupplier encryptedHeaderContentSupplier) throws IOException { + return null; + } + + @Override + public DecryptedRangedStreamProvider createDecryptingStreamOfRange( + Object cryptoContext, + long startPosOfRawContent, + long endPosOfRawContent + ) { + return null; + } + + @Override + public long estimateDecryptedLength(Object cryptoContext, long contentLength) { + return 0; + } + + @Override + public void close() throws IOException { + + } + } + + private static abstract class TestCryptoHandler implements CryptoPlugin { + private final Settings settings; + + public TestCryptoHandler(Settings settings) { + this.settings = settings; + } + + public CryptoHandler getOrCreateCryptoHandler( + MasterKeyProvider keyProvider, + String keyProviderName, + String keyProviderType, + Runnable onClose + ) { + return new TestCryptoProvider(keyProviderName, keyProviderType); + } } private static class TestRepository implements Repository { @@ -286,6 +699,16 @@ public long getRestoreThrottleTimeInNanos() { return 0; } + @Override + public long getRemoteUploadThrottleTimeInNanos() { + return 0; + } + + @Override + public long getRemoteDownloadThrottleTimeInNanos() { + return 0; + } + @Override public String startVerification() { return null; @@ -306,6 +729,11 @@ public boolean isReadOnly() { return false; } + @Override + public boolean isSystemRepository() { + return false; + } + @Override public void snapshotShard( Store store, @@ -430,16 +858,19 @@ public void close() { private static class MeteredRepositoryTypeA extends MeteredBlobStoreRepository { private static final String TYPE = "type-a"; private static final RepositoryStats STATS = new RepositoryStats(Map.of("GET", 10L)); + private final TestCryptoProvider cryptoHandler; private MeteredRepositoryTypeA(RepositoryMetadata metadata, ClusterService clusterService) { - super( - metadata, - false, - mock(NamedXContentRegistry.class), - clusterService, - mock(RecoverySettings.class), - Map.of("bucket", "bucket-a") - ); + super(metadata, mock(NamedXContentRegistry.class), clusterService, mock(RecoverySettings.class), Map.of("bucket", "bucket-a")); + + if (metadata.cryptoMetadata() != null) { + cryptoHandler = new TestCryptoProvider( + metadata.cryptoMetadata().keyProviderName(), + metadata.cryptoMetadata().keyProviderType() + ); + } else { + cryptoHandler = null; + } } @Override @@ -461,16 +892,19 @@ public BlobPath basePath() { private static class MeteredRepositoryTypeB extends MeteredBlobStoreRepository { private static final String TYPE = "type-b"; private static final RepositoryStats STATS = new RepositoryStats(Map.of("LIST", 20L)); + private final TestCryptoProvider cryptoHandler; private MeteredRepositoryTypeB(RepositoryMetadata metadata, ClusterService clusterService) { - super( - metadata, - false, - mock(NamedXContentRegistry.class), - clusterService, - mock(RecoverySettings.class), - Map.of("bucket", "bucket-b") - ); + super(metadata, mock(NamedXContentRegistry.class), clusterService, mock(RecoverySettings.class), Map.of("bucket", "bucket-b")); + + if (metadata.cryptoMetadata() != null) { + cryptoHandler = new TestCryptoProvider( + metadata.cryptoMetadata().keyProviderName(), + metadata.cryptoMetadata().keyProviderType() + ); + } else { + cryptoHandler = null; + } } @Override diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java index cf0b06a3f7d16..da0cbcb1d4b17 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesStatsArchiveTests.java @@ -32,7 +32,6 @@ package org.opensearch.repositories; -import org.opensearch.common.UUIDs; import org.opensearch.common.unit.TimeValue; import org.opensearch.test.OpenSearchTestCase; @@ -122,14 +121,11 @@ private RepositoryStatsSnapshot createRepositoryStats(RepositoryStats repository private RepositoryStatsSnapshot createRepositoryStats(RepositoryStats repositoryStats, long clusterVersion) { RepositoryInfo repositoryInfo = new RepositoryInfo( - UUIDs.randomBase64UUID(), randomAlphaOfLength(10), randomAlphaOfLength(10), - Map.of("bucket", randomAlphaOfLength(10)), - System.currentTimeMillis(), - null + Map.of("bucket", randomAlphaOfLength(10)) ); - return new RepositoryStatsSnapshot(repositoryInfo, repositoryStats, clusterVersion, true); + return new RepositoryStatsSnapshot(repositoryInfo, repositoryStats, clusterVersion); } } diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java index 0dbc0372458b5..d4fd96dddeaa9 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java @@ -59,7 +59,9 @@ protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String rem BlobPath shardLevelBlobPath = remoteStorerepository.basePath().add(indexUUID).add("0").add("segments").add("lock_files"); BlobContainer blobContainer = remoteStorerepository.blobStore().blobContainer(shardLevelBlobPath); try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { - return Arrays.stream(lockDirectory.listAll()).filter(lock -> lock.endsWith(".lock")).toArray(String[]::new); + return Arrays.stream(lockDirectory.listAll()) + .filter(lock -> lock.endsWith(".lock") || lock.endsWith(".v2_lock")) + .toArray(String[]::new); } } @@ -111,7 +113,7 @@ protected void updateRepository(Client client, String repoName, Settings repoSet createRepository(client, repoName, repoSettings); } - protected Settings getRemoteStoreBackedIndexSettings(String remoteStoreRepo) { + protected Settings getRemoteStoreBackedIndexSettings() { return Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") .put("index.refresh_interval", "300s") diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java index 73479e8f117f3..bed1d0d4d0094 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java @@ -37,53 +37,79 @@ import org.opensearch.client.Client; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.Environment; +import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.index.IndexSettings; import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryData; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.snapshots.SnapshotId; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; +import java.nio.file.Path; import java.util.Arrays; import java.util.List; +import java.util.Locale; import java.util.stream.Collectors; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.hamcrest.Matchers.equalTo; /** * Tests for the {@link BlobStoreRepository} and its subclasses. */ public class BlobStoreRepositoryRemoteIndexTests extends BlobStoreRepositoryHelperTests { - - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); - } - @Override protected Settings nodeSettings() { + Path tempDir = createTempDir(); return Settings.builder() .put(super.nodeSettings()) .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey(), "test-rs-repo") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "test-rs-repo") + .put(buildRemoteStoreNodeAttributes("test-rs-repo", tempDir.resolve("repo"))) + .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) + .put(Environment.PATH_REPO_SETTING.getKey(), tempDir.resolve("repo")) + .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), tempDir.getParent()) + .build(); + } + + private Settings buildRemoteStoreNodeAttributes(String repoName, Path repoPath) { + String repoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + repoName + ); + String repoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + repoName + ); + + return Settings.builder() + .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName) + .put(repoTypeAttributeKey, FsRepository.TYPE) + .put(repoSettingsAttributeKeyPrefix + "location", repoPath) + .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName) + .put(repoTypeAttributeKey, FsRepository.TYPE) + .put(repoSettingsAttributeKeyPrefix + "location", repoPath) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName) + .put(repoTypeAttributeKey, FsRepository.TYPE) + .put(repoSettingsAttributeKeyPrefix + "location", repoPath) + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false) .build(); } // Validate Scenario Normal Snapshot -> remoteStoreShallowCopy Snapshot -> normal Snapshot public void testRetrieveShallowCopySnapshotCase1() throws IOException { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final Client client = client(); final String snapshotRepositoryName = "test-repo"; final String remoteStoreRepositoryName = "test-rs-repo"; @@ -96,13 +122,6 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { .build(); createRepository(client, snapshotRepositoryName, snapshotRepoSettings); - logger.info("--> creating remote store repository"); - Settings remoteStoreRepoSettings = Settings.builder() - .put(node().settings()) - .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - .build(); - createRepository(client, remoteStoreRepositoryName, remoteStoreRepoSettings); - logger.info("--> creating an index and indexing documents"); final String indexName = "test-idx"; createIndex(indexName); @@ -111,7 +130,7 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { logger.info("--> creating a remote store enabled index and indexing documents"); final String remoteStoreIndexName = "test-rs-idx"; - Settings indexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepositoryName); + Settings indexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreIndexName, indexSettings); indexDocuments(client, remoteStoreIndexName); @@ -125,7 +144,7 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { final SnapshotId snapshotId1 = createSnapshotResponse.getSnapshotInfo().snapshotId(); String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 0) : "there should be no lock files present in directory, but found " + Arrays.toString(lockFiles); + assertEquals("there should be no lock files present in directory, but found " + Arrays.toString(lockFiles), 0, lockFiles.length); logger.info("--> create remote index shallow snapshot"); Settings snapshotRepoSettingsForShallowCopy = Settings.builder() .put(snapshotRepoSettings) @@ -142,8 +161,8 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { final SnapshotId snapshotId2 = createSnapshotResponse.getSnapshotInfo().snapshotId(); lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); - assert lockFiles[0].endsWith(snapshotId2.getUUID() + ".lock"); + assertEquals("there should be only one lock file, but found " + Arrays.toString(lockFiles), 1, lockFiles.length); + assertTrue(lockFiles[0].endsWith(snapshotId2.getUUID() + ".v2_lock")); logger.info("--> create another normal snapshot"); updateRepository(client, snapshotRepositoryName, snapshotRepoSettings); @@ -156,8 +175,8 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { final SnapshotId snapshotId3 = createSnapshotResponse.getSnapshotInfo().snapshotId(); lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); - assert lockFiles[0].endsWith(snapshotId2.getUUID() + ".lock"); + assertEquals("there should be only one lock file, but found " + Arrays.toString(lockFiles), 1, lockFiles.length); + assertTrue(lockFiles[0].endsWith(snapshotId2.getUUID() + ".v2_lock")); logger.info("--> make sure the node's repository can resolve the snapshots"); final List originalSnapshots = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); @@ -183,7 +202,6 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { } public void testGetRemoteStoreShallowCopyShardMetadata() throws IOException { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final Client client = client(); final String snapshotRepositoryName = "test-repo"; final String remoteStoreRepositoryName = "test-rs-repo"; @@ -196,16 +214,9 @@ public void testGetRemoteStoreShallowCopyShardMetadata() throws IOException { .build(); createRepository(client, snapshotRepositoryName, snapshotRepoSettings); - logger.info("--> creating remote store repository"); - Settings remoteStoreRepoSettings = Settings.builder() - .put(node().settings()) - .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - .build(); - createRepository(client, remoteStoreRepositoryName, remoteStoreRepoSettings); - logger.info("--> creating a remote store enabled index and indexing documents"); final String remoteStoreIndexName = "test-rs-idx"; - Settings indexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepositoryName); + Settings indexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreIndexName, indexSettings); indexDocuments(client, remoteStoreIndexName); @@ -225,8 +236,8 @@ public void testGetRemoteStoreShallowCopyShardMetadata() throws IOException { final SnapshotId snapshotId = createSnapshotResponse.getSnapshotInfo().snapshotId(); String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); - assert lockFiles[0].endsWith(snapshotId.getUUID() + ".lock"); + assertEquals("there should be only one lock file, but found " + Arrays.toString(lockFiles), 1, lockFiles.length); + assertTrue(lockFiles[0].endsWith(snapshotId.getUUID() + ".v2_lock")); final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); @@ -246,7 +257,6 @@ public void testGetRemoteStoreShallowCopyShardMetadata() throws IOException { // Validate Scenario remoteStoreShallowCopy Snapshot -> remoteStoreShallowCopy Snapshot // -> remoteStoreShallowCopy Snapshot -> normal snapshot public void testRetrieveShallowCopySnapshotCase2() throws IOException { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final Client client = client(); final String snapshotRepositoryName = "test-repo"; final String remoteStoreRepositoryName = "test-rs-repo"; @@ -267,9 +277,6 @@ public void testRetrieveShallowCopySnapshotCase2() throws IOException { assertFalse(updatedRepositoryMetadata.settings().getAsBoolean(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false)); - logger.info("--> creating remote store repository"); - createRepository(client, remoteStoreRepositoryName); - logger.info("--> creating an index and indexing documents"); final String indexName = "test-idx"; createIndex(indexName); @@ -278,7 +285,7 @@ public void testRetrieveShallowCopySnapshotCase2() throws IOException { logger.info("--> creating a remote store enabled index and indexing documents"); final String remoteStoreIndexName = "test-rs-idx"; - Settings indexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepositoryName); + Settings indexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreIndexName, indexSettings); indexDocuments(client, remoteStoreIndexName); @@ -305,8 +312,8 @@ public void testRetrieveShallowCopySnapshotCase2() throws IOException { final SnapshotId snapshotId1 = createSnapshotResponse.getSnapshotInfo().snapshotId(); String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 1) : "lock files are " + Arrays.toString(lockFiles); - assert lockFiles[0].endsWith(snapshotId1.getUUID() + ".lock"); + assertEquals("lock files are " + Arrays.toString(lockFiles), 1, lockFiles.length); + assertTrue(lockFiles[0].endsWith(snapshotId1.getUUID() + ".v2_lock")); logger.info("--> create second remote index shallow snapshot"); createSnapshotResponse = client.admin() @@ -318,10 +325,10 @@ public void testRetrieveShallowCopySnapshotCase2() throws IOException { final SnapshotId snapshotId2 = createSnapshotResponse.getSnapshotInfo().snapshotId(); lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 2) : "lock files are " + Arrays.toString(lockFiles); + assertEquals("lock files are " + Arrays.toString(lockFiles), 2, lockFiles.length); List shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2); for (SnapshotId snapshotId : shallowCopySnapshotIDs) { - assert lockFiles[0].contains(snapshotId.getUUID()) || lockFiles[1].contains(snapshotId.getUUID()); + assertTrue(lockFiles[0].contains(snapshotId.getUUID()) || lockFiles[1].contains(snapshotId.getUUID())); } logger.info("--> create third remote index shallow snapshot"); createSnapshotResponse = client.admin() @@ -333,12 +340,14 @@ public void testRetrieveShallowCopySnapshotCase2() throws IOException { final SnapshotId snapshotId3 = createSnapshotResponse.getSnapshotInfo().snapshotId(); lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 3); + assertEquals(3, lockFiles.length); shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); for (SnapshotId snapshotId : shallowCopySnapshotIDs) { - assert lockFiles[0].contains(snapshotId.getUUID()) - || lockFiles[1].contains(snapshotId.getUUID()) - || lockFiles[2].contains(snapshotId.getUUID()); + assertTrue( + lockFiles[0].contains(snapshotId.getUUID()) + || lockFiles[1].contains(snapshotId.getUUID()) + || lockFiles[2].contains(snapshotId.getUUID()) + ); } logger.info("--> create normal snapshot"); createRepository(client, snapshotRepositoryName, snapshotRepoSettings); @@ -351,12 +360,14 @@ public void testRetrieveShallowCopySnapshotCase2() throws IOException { final SnapshotId snapshotId4 = createSnapshotResponse.getSnapshotInfo().snapshotId(); lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 3) : "lock files are " + Arrays.toString(lockFiles); + assertEquals("lock files are " + Arrays.toString(lockFiles), 3, lockFiles.length); shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); for (SnapshotId snapshotId : shallowCopySnapshotIDs) { - assert lockFiles[0].contains(snapshotId.getUUID()) - || lockFiles[1].contains(snapshotId.getUUID()) - || lockFiles[2].contains(snapshotId.getUUID()); + assertTrue( + lockFiles[0].contains(snapshotId.getUUID()) + || lockFiles[1].contains(snapshotId.getUUID()) + || lockFiles[2].contains(snapshotId.getUUID()) + ); } logger.info("--> make sure the node's repository can resolve the snapshots"); diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index bb3098153930d..1e86142d701aa 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -41,7 +41,6 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; @@ -105,11 +104,6 @@ protected void assertSnapshotOrGenericThread() { } } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); - } - public void testRetrieveSnapshots() throws Exception { final Client client = client(); final Path location = OpenSearchIntegTestCase.randomRepoPath(node().settings()); @@ -166,8 +160,6 @@ public void testRetrieveSnapshots() throws Exception { assertThat(snapshotIds, equalTo(originalSnapshots)); } - // Validate Scenario remoteStoreShallowCopy Snapshot -> remoteStoreShallowCopy Snapshot - // -> remoteStoreShallowCopy Snapshot -> normal snapshot public void testReadAndWriteSnapshotsThroughIndexFile() throws Exception { final BlobStoreRepository repository = setupRepo(); final long pendingGeneration = repository.metadata.pendingGeneration(); @@ -260,7 +252,7 @@ public void testBadChunksize() throws Exception { ); } - public void testFsRepositoryCompressDeprecated() { + public void testFsRepositoryCompressDeprecatedIgnored() { final Path location = OpenSearchIntegTestCase.randomRepoPath(node().settings()); final Settings settings = Settings.builder().put(node().settings()).put("location", location).build(); final RepositoryMetadata metadata = new RepositoryMetadata("test-repo", REPO_TYPE, settings); @@ -273,10 +265,7 @@ public void testFsRepositoryCompressDeprecated() { new FsRepository(metadata, useCompressEnvironment, null, BlobStoreTestUtil.mockClusterService(), null); - assertWarnings( - "[repositories.fs.compress] setting was deprecated in OpenSearch and will be removed in a future release!" - + " See the breaking changes documentation for the next major version." - ); + assertNoDeprecationWarnings(); } private static void writeIndexGen(BlobStoreRepository repository, RepositoryData repositoryData, long generation) throws Exception { diff --git a/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java index 303f60283f69f..d9f599714805b 100644 --- a/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/fs/FsRepositoryTests.java @@ -58,6 +58,7 @@ import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.index.shard.ShardId; @@ -70,6 +71,7 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.repositories.IndexId; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.BlobStoreTestUtil; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; @@ -90,6 +92,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.is; public class FsRepositoryTests extends OpenSearchTestCase { @@ -218,6 +221,31 @@ public void testSnapshotAndRestore() throws IOException, InterruptedException { } } + public void testRestrictedSettingsDefault() { + Path repo = createTempDir(); + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), repo.toAbsolutePath()) + .put("location", repo) + .put(FsRepository.BASE_PATH_SETTING.getKey(), "my_base_path") + .build(); + RepositoryMetadata metadata = new RepositoryMetadata("test", "fs", settings); + FsRepository repository = new FsRepository( + metadata, + new Environment(settings, null), + NamedXContentRegistry.EMPTY, + BlobStoreTestUtil.mockClusterService(), + new RecoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) + ); + + List> restrictedSettings = repository.getRestrictedSystemRepositorySettings(); + assertThat(restrictedSettings.size(), is(4)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.READONLY_SETTING)); + assertTrue(restrictedSettings.contains(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY)); + assertTrue(restrictedSettings.contains(FsRepository.LOCATION_SETTING)); + } + private void runGeneric(ThreadPool threadPool, Runnable runnable) throws InterruptedException { CountDownLatch latch = new CountDownLatch(1); threadPool.generic().submit(() -> { diff --git a/server/src/test/java/org/opensearch/repositories/fs/ReloadableFsRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/fs/ReloadableFsRepositoryTests.java new file mode 100644 index 0000000000000..db2cf9c3e9582 --- /dev/null +++ b/server/src/test/java/org/opensearch/repositories/fs/ReloadableFsRepositoryTests.java @@ -0,0 +1,119 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.fs; + +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.compress.DeflateCompressor; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.compress.ZstdCompressor; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.compress.CompressorRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.repositories.blobstore.BlobStoreTestUtil; +import org.opensearch.test.OpenSearchTestCase; + +import java.nio.file.Path; +import java.util.Locale; + +public class ReloadableFsRepositoryTests extends OpenSearchTestCase { + ReloadableFsRepository repository; + RepositoryMetadata metadata; + Settings settings; + Path repo; + + @Override + public void setUp() throws Exception { + super.setUp(); + + repo = createTempDir(); + settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), repo.toAbsolutePath()) + .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()) + .put("location", repo) + .put("compress", false) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(FsRepository.BASE_PATH_SETTING.getKey(), "my_base_path") + .build(); + metadata = new RepositoryMetadata("test", "fs", settings); + repository = new ReloadableFsRepository( + metadata, + new Environment(settings, null), + NamedXContentRegistry.EMPTY, + BlobStoreTestUtil.mockClusterService(), + new RecoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) + ); + } + + /** + * Validates that {@link ReloadableFsRepository} supports inplace reloading + */ + public void testIsReloadable() { + assertTrue(repository.isReloadable()); + } + + /** + * Updates repository metadata of an existing repository to enable default compressor + */ + public void testCompressReload() { + assertEquals(CompressorRegistry.none(), repository.getCompressor()); + updateCompressionTypeToDefault(); + repository.validateMetadata(metadata); + repository.reload(metadata); + assertEquals(CompressorRegistry.defaultCompressor(), repository.getCompressor()); + } + + /** + * Updates repository metadata of an existing repository to change compressor type from default to Zstd + */ + public void testCompressionTypeReload() { + assertEquals(CompressorRegistry.none(), repository.getCompressor()); + updateCompressionTypeToDefault(); + repository = new ReloadableFsRepository( + metadata, + new Environment(settings, null), + NamedXContentRegistry.EMPTY, + BlobStoreTestUtil.mockClusterService(), + new RecoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) + ); + assertEquals(CompressorRegistry.defaultCompressor(), repository.getCompressor()); + + settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), repo.toAbsolutePath()) + .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()) + .put("location", repo) + .put("compress", true) + .put("compression_type", ZstdCompressor.NAME.toLowerCase(Locale.ROOT)) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(FsRepository.BASE_PATH_SETTING.getKey(), "my_base_path") + .build(); + metadata = new RepositoryMetadata("test", "fs", settings); + repository.validateMetadata(metadata); + repository.reload(metadata); + assertEquals(CompressorRegistry.getCompressor(ZstdCompressor.NAME.toUpperCase(Locale.ROOT)), repository.getCompressor()); + } + + private void updateCompressionTypeToDefault() { + settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), repo.toAbsolutePath()) + .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()) + .put("location", repo) + .put("compress", true) + .put("compression_type", DeflateCompressor.NAME.toLowerCase(Locale.ROOT)) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(FsRepository.BASE_PATH_SETTING.getKey(), "my_base_path") + .build(); + metadata = new RepositoryMetadata("test", "fs", settings); + } +} diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationActionTests.java index 86cbe2fd991c7..41ad9e8bcbb44 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestCatSegmentReplicationActionTests.java @@ -84,6 +84,7 @@ public void testSegmentReplicationAction() throws IOException { 0L, 0L, 0L, + 0L, 0L ); segmentReplicationShardStats.setCurrentReplicationState(state); @@ -141,7 +142,7 @@ public void testSegmentReplicationAction() throws IOException { currentReplicationState.getTargetNode().getHostName(), shardStats.getCheckpointsBehindCount(), new ByteSizeValue(shardStats.getBytesBehindCount()), - new TimeValue(shardStats.getCurrentReplicationTimeMillis()), + new TimeValue(shardStats.getCurrentReplicationLagMillis()), new TimeValue(shardStats.getLastCompletedReplicationTimeMillis()), rejectedRequestCount ); diff --git a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java index 18b35341e0de1..d160277808300 100644 --- a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java @@ -321,9 +321,9 @@ public void testCreatePitMoreThanMaxOpenPitContexts() throws Exception { // deleteall DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds.toArray(new String[pitIds.size()])); - /** - * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context - * not found exceptions don't result in failures ( as deletion in one node is successful ) + /* + When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + not found exceptions don't result in failures ( as deletion in one node is successful ) */ ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); @@ -489,8 +489,8 @@ public void testPitAfterUpdateIndex() throws Exception { client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get().getHits().getTotalHits().value, Matchers.equalTo(50L) ); - /** - * assert without point in time + /* + assert without point in time */ assertThat( @@ -509,8 +509,8 @@ public void testPitAfterUpdateIndex() throws Exception { client().prepareSearch().setSize(0).setQuery(termQuery("message", "update")).get().getHits().getTotalHits().value, Matchers.equalTo(50L) ); - /** - * using point in time id will have the same search results as ones before update + /* + using point in time id will have the same search results as ones before update */ assertThat( client().prepareSearch() diff --git a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java index f569fe3b63af0..347011af98c6d 100644 --- a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java @@ -40,16 +40,21 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.Version; import org.opensearch.action.OriginalIndices; import org.opensearch.action.search.SearchType; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; @@ -75,6 +80,7 @@ import org.opensearch.search.rescore.RescoreContext; import org.opensearch.search.slice.SliceBuilder; import org.opensearch.search.sort.SortAndFormats; +import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -547,6 +553,159 @@ protected Engine.Searcher acquireSearcherInternal(String source) { } } + public void testSearchPathEvaluationUsingSortField() throws Exception { + // enable the concurrent set FeatureFlag + FeatureFlagSetter.set(FeatureFlags.CONCURRENT_SEGMENT_SEARCH); + ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class); + when(shardSearchRequest.searchType()).thenReturn(SearchType.DEFAULT); + ShardId shardId = new ShardId("index", UUID.randomUUID().toString(), 1); + when(shardSearchRequest.shardId()).thenReturn(shardId); + + ThreadPool threadPool = new TestThreadPool(this.getClass().getName()); + IndexShard indexShard = mock(IndexShard.class); + QueryCachingPolicy queryCachingPolicy = mock(QueryCachingPolicy.class); + when(indexShard.getQueryCachingPolicy()).thenReturn(queryCachingPolicy); + when(indexShard.getThreadPool()).thenReturn(threadPool); + + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .build(); + + IndexService indexService = mock(IndexService.class); + QueryShardContext queryShardContext = mock(QueryShardContext.class); + when(indexService.newQueryShardContext(eq(shardId.id()), any(), any(), nullable(String.class), anyBoolean())).thenReturn( + queryShardContext + ); + + IndexMetadata indexMetadata = IndexMetadata.builder("index").settings(settings).build(); + IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); + when(indexService.getIndexSettings()).thenReturn(indexSettings); + + BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + + try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + + final Supplier searcherSupplier = () -> new Engine.SearcherSupplier(Function.identity()) { + @Override + protected void doClose() {} + + @Override + protected Engine.Searcher acquireSearcherInternal(String source) { + try { + IndexReader reader = w.getReader(); + return new Engine.Searcher( + "test", + reader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + reader + ); + } catch (IOException exc) { + throw new AssertionError(exc); + } + } + }; + + SearchShardTarget target = new SearchShardTarget("node", shardId, null, OriginalIndices.NONE); + ReaderContext readerContext = new ReaderContext( + newContextId(), + indexService, + indexShard, + searcherSupplier.get(), + randomNonNegativeLong(), + false + ); + + final ClusterService clusterService = mock(ClusterService.class); + final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + clusterSettings.registerSetting(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING); + clusterSettings.applySettings( + Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() + ); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + DefaultSearchContext context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + null, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null + ); + + // Case1: if sort is on timestamp field, non-concurrent path is used + context.sort( + new SortAndFormats(new Sort(new SortField("@timestamp", SortField.Type.INT)), new DocValueFormat[] { DocValueFormat.RAW }) + ); + context.evaluateRequestShouldUseConcurrentSearch(); + assertFalse(context.shouldUseConcurrentSearch()); + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + + // Case2: if sort is on other field, concurrent path is used + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + clusterService, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null + ); + context.sort( + new SortAndFormats(new Sort(new SortField("test2", SortField.Type.INT)), new DocValueFormat[] { DocValueFormat.RAW }) + ); + context.evaluateRequestShouldUseConcurrentSearch(); + if (executor == null) { + assertFalse(context.shouldUseConcurrentSearch()); + } else { + assertTrue(context.shouldUseConcurrentSearch()); + } + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + + // Case 3: With no sort, concurrent path is used + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + clusterService, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null + ); + context.evaluateRequestShouldUseConcurrentSearch(); + if (executor == null) { + assertFalse(context.shouldUseConcurrentSearch()); + } else { + assertTrue(context.shouldUseConcurrentSearch()); + } + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + + // shutdown the threadpool + threadPool.shutdown(); + } + } + private ShardSearchContextId newContextId() { return new ShardSearchContextId(UUIDs.randomBase64UUID(), randomNonNegativeLong()); } diff --git a/server/src/test/java/org/opensearch/search/GenericSearchExtBuilderTests.java b/server/src/test/java/org/opensearch/search/GenericSearchExtBuilderTests.java new file mode 100644 index 0000000000000..8fb1814962155 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/GenericSearchExtBuilderTests.java @@ -0,0 +1,422 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.search; + +import org.opensearch.Version; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchResponseTests; +import org.opensearch.action.search.ShardSearchFailure; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.plugins.SearchPlugin; +import org.opensearch.rest.action.search.RestSearchAction; +import org.opensearch.search.aggregations.AggregationsTests; +import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.search.profile.SearchProfileShardResults; +import org.opensearch.search.profile.SearchProfileShardResultsTests; +import org.opensearch.search.suggest.Suggest; +import org.opensearch.search.suggest.SuggestTests; +import org.opensearch.test.InternalAggregationTestCase; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.UUID; + +import static java.util.Collections.singletonMap; + +public class GenericSearchExtBuilderTests extends OpenSearchTestCase { + + private static final NamedXContentRegistry xContentRegistry; + static { + List namedXContents = new ArrayList<>(InternalAggregationTestCase.getDefaultNamedXContents()); + namedXContents.addAll(SuggestTests.getDefaultNamedXContents()); + namedXContents.add( + new NamedXContentRegistry.Entry( + SearchExtBuilder.class, + GenericSearchExtBuilder.EXT_BUILDER_NAME, + GenericSearchExtBuilder::fromXContent + ) + ); + xContentRegistry = new NamedXContentRegistry(namedXContents); + } + + private final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry( + new SearchModule(Settings.EMPTY, List.of(new SearchPlugin() { + @Override + public List> getSearchExts() { + return List.of( + new SearchExtSpec<>( + GenericSearchExtBuilder.EXT_BUILDER_NAME, + GenericSearchExtBuilder::new, + GenericSearchExtBuilder::fromXContent + ) + ); + } + })).getNamedWriteables() + ); + + @Override + protected NamedXContentRegistry xContentRegistry() { + return xContentRegistry; + } + + SearchResponseTests srt = new SearchResponseTests(); + private AggregationsTests aggregationsTests = new AggregationsTests(); + + @Before + public void init() throws Exception { + aggregationsTests.init(); + } + + @After + public void cleanUp() throws Exception { + aggregationsTests.cleanUp(); + } + + public void testFromXContentWithUnregisteredSearchExtBuilders() throws IOException { + List namedXContents = new ArrayList<>(InternalAggregationTestCase.getDefaultNamedXContents()); + namedXContents.addAll(SuggestTests.getDefaultNamedXContents()); + String dummyId = UUID.randomUUID().toString(); + List extBuilders = List.of( + new SimpleValueSearchExtBuilder(dummyId), + new MapSearchExtBuilder(Map.of("x", "y", "a", "b")), + new ListSearchExtBuilder(List.of("1", "2", "3")) + ); + SearchResponse response = srt.createTestItem(false, extBuilders); + MediaType xcontentType = randomFrom(XContentType.values()); + boolean humanReadable = randomBoolean(); + final ToXContent.Params params = new ToXContent.MapParams(singletonMap(RestSearchAction.TYPED_KEYS_PARAM, "true")); + BytesReference originalBytes = toShuffledXContent(response, xcontentType, params, humanReadable); + XContentParser parser = createParser(new NamedXContentRegistry(namedXContents), xcontentType.xContent(), originalBytes); + SearchResponse parsed = SearchResponse.fromXContent(parser); + assertEquals(extBuilders.size(), response.getInternalResponse().getSearchExtBuilders().size()); + + List actual = parsed.getInternalResponse().getSearchExtBuilders(); + assertEquals(extBuilders.size(), actual.size()); + for (int i = 0; i < actual.size(); i++) { + assertTrue(actual.get(0) instanceof GenericSearchExtBuilder); + } + } + + // This test case fails because GenericSearchExtBuilder does not retain the name of the SearchExtBuilder that it is replacing. + // GenericSearchExtBuilder has its own "generic_ext" section name. + // public void testFromXContentWithSearchExtBuilders() throws IOException { + // String dummyId = UUID.randomUUID().toString(); + // srt.doFromXContentTestWithRandomFields(createTestItem(false, List.of(new SimpleValueSearchExtBuilder(dummyId))), false); + // } + + public void testFromXContentWithGenericSearchExtBuildersForSimpleValues() throws IOException { + String dummyId = UUID.randomUUID().toString(); + srt.doFromXContentTestWithRandomFields( + createTestItem(false, List.of(new GenericSearchExtBuilder(dummyId, GenericSearchExtBuilder.ValueType.SIMPLE))), + false + ); + } + + public void testFromXContentWithGenericSearchExtBuildersForMapValues() throws IOException { + srt.doFromXContentTestWithRandomFields( + createTestItem(false, List.of(new GenericSearchExtBuilder(Map.of("x", "y", "a", "b"), GenericSearchExtBuilder.ValueType.MAP))), + false + ); + } + + public void testFromXContentWithGenericSearchExtBuildersForListValues() throws IOException { + String dummyId = UUID.randomUUID().toString(); + srt.doFromXContentTestWithRandomFields( + createTestItem(false, List.of(new GenericSearchExtBuilder(List.of("1", "2", "3"), GenericSearchExtBuilder.ValueType.LIST))), + false + ); + } + + public void testSerializationWithGenericSearchExtBuildersForSimpleValues() throws IOException { + String id = UUID.randomUUID().toString(); + SearchResponse searchResponse = createTestItem( + false, + List.of(new GenericSearchExtBuilder(id, GenericSearchExtBuilder.ValueType.SIMPLE)) + ); + SearchResponse deserialized = copyWriteable(searchResponse, namedWriteableRegistry, SearchResponse::new, Version.CURRENT); + if (searchResponse.getHits().getTotalHits() == null) { + assertNull(deserialized.getHits().getTotalHits()); + } else { + assertEquals(searchResponse.getHits().getTotalHits().value, deserialized.getHits().getTotalHits().value); + assertEquals(searchResponse.getHits().getTotalHits().relation, deserialized.getHits().getTotalHits().relation); + } + assertEquals(searchResponse.getHits().getHits().length, deserialized.getHits().getHits().length); + assertEquals(searchResponse.getNumReducePhases(), deserialized.getNumReducePhases()); + assertEquals(searchResponse.getFailedShards(), deserialized.getFailedShards()); + assertEquals(searchResponse.getTotalShards(), deserialized.getTotalShards()); + assertEquals(searchResponse.getSkippedShards(), deserialized.getSkippedShards()); + assertEquals(searchResponse.getClusters(), deserialized.getClusters()); + assertEquals( + searchResponse.getInternalResponse().getSearchExtBuilders().get(0), + deserialized.getInternalResponse().getSearchExtBuilders().get(0) + ); + } + + public void testSerializationWithGenericSearchExtBuildersForMapValues() throws IOException { + SearchResponse searchResponse = createTestItem( + false, + List.of(new GenericSearchExtBuilder(Map.of("x", "y", "a", "b"), GenericSearchExtBuilder.ValueType.MAP)) + ); + SearchResponse deserialized = copyWriteable(searchResponse, namedWriteableRegistry, SearchResponse::new, Version.CURRENT); + if (searchResponse.getHits().getTotalHits() == null) { + assertNull(deserialized.getHits().getTotalHits()); + } else { + assertEquals(searchResponse.getHits().getTotalHits().value, deserialized.getHits().getTotalHits().value); + assertEquals(searchResponse.getHits().getTotalHits().relation, deserialized.getHits().getTotalHits().relation); + } + assertEquals(searchResponse.getHits().getHits().length, deserialized.getHits().getHits().length); + assertEquals(searchResponse.getNumReducePhases(), deserialized.getNumReducePhases()); + assertEquals(searchResponse.getFailedShards(), deserialized.getFailedShards()); + assertEquals(searchResponse.getTotalShards(), deserialized.getTotalShards()); + assertEquals(searchResponse.getSkippedShards(), deserialized.getSkippedShards()); + assertEquals(searchResponse.getClusters(), deserialized.getClusters()); + assertEquals( + searchResponse.getInternalResponse().getSearchExtBuilders().get(0), + deserialized.getInternalResponse().getSearchExtBuilders().get(0) + ); + } + + public void testSerializationWithGenericSearchExtBuildersForListValues() throws IOException { + SearchResponse searchResponse = createTestItem( + false, + List.of(new GenericSearchExtBuilder(List.of("1", "2", "3"), GenericSearchExtBuilder.ValueType.LIST)) + ); + SearchResponse deserialized = copyWriteable(searchResponse, namedWriteableRegistry, SearchResponse::new, Version.CURRENT); + if (searchResponse.getHits().getTotalHits() == null) { + assertNull(deserialized.getHits().getTotalHits()); + } else { + assertEquals(searchResponse.getHits().getTotalHits().value, deserialized.getHits().getTotalHits().value); + assertEquals(searchResponse.getHits().getTotalHits().relation, deserialized.getHits().getTotalHits().relation); + } + assertEquals(searchResponse.getHits().getHits().length, deserialized.getHits().getHits().length); + assertEquals(searchResponse.getNumReducePhases(), deserialized.getNumReducePhases()); + assertEquals(searchResponse.getFailedShards(), deserialized.getFailedShards()); + assertEquals(searchResponse.getTotalShards(), deserialized.getTotalShards()); + assertEquals(searchResponse.getSkippedShards(), deserialized.getSkippedShards()); + assertEquals(searchResponse.getClusters(), deserialized.getClusters()); + assertEquals( + searchResponse.getInternalResponse().getSearchExtBuilders().get(0), + deserialized.getInternalResponse().getSearchExtBuilders().get(0) + ); + } + + public SearchResponse createTestItem( + boolean minimal, + List searchExtBuilders, + ShardSearchFailure... shardSearchFailures + ) { + boolean timedOut = randomBoolean(); + Boolean terminatedEarly = randomBoolean() ? null : randomBoolean(); + int numReducePhases = randomIntBetween(1, 10); + long tookInMillis = randomNonNegativeLong(); + int totalShards = randomIntBetween(1, Integer.MAX_VALUE); + int successfulShards = randomIntBetween(0, totalShards); + int skippedShards = randomIntBetween(0, totalShards); + InternalSearchResponse internalSearchResponse; + if (minimal == false) { + SearchHits hits = SearchHitsTests.createTestItem(true, true); + InternalAggregations aggregations = aggregationsTests.createTestInstance(); + Suggest suggest = SuggestTests.createTestItem(); + SearchProfileShardResults profileShardResults = SearchProfileShardResultsTests.createTestItem(); + internalSearchResponse = new InternalSearchResponse( + hits, + aggregations, + suggest, + profileShardResults, + timedOut, + terminatedEarly, + numReducePhases, + searchExtBuilders + ); + } else { + internalSearchResponse = InternalSearchResponse.empty(); + } + + return new SearchResponse( + internalSearchResponse, + null, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + shardSearchFailures, + randomBoolean() ? randomClusters() : SearchResponse.Clusters.EMPTY, + null + ); + } + + static SearchResponse.Clusters randomClusters() { + int totalClusters = randomIntBetween(0, 10); + int successfulClusters = randomIntBetween(0, totalClusters); + int skippedClusters = totalClusters - successfulClusters; + return new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters); + } + + static class SimpleValueSearchExtBuilder extends SearchExtBuilder { + + static ParseField FIELD = new ParseField("simple_value"); + + private final String id; + + public SimpleValueSearchExtBuilder(String id) { + assertNotNull(id); + this.id = id; + } + + public SimpleValueSearchExtBuilder(StreamInput in) throws IOException { + this.id = in.readString(); + } + + public String getId() { + return this.id; + } + + @Override + public String getWriteableName() { + return FIELD.getPreferredName(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(this.id); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(FIELD.getPreferredName(), id); + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + + if (!(obj instanceof SimpleValueSearchExtBuilder)) { + return false; + } + + return this.id.equals(((SimpleValueSearchExtBuilder) obj).getId()); + } + + public static SimpleValueSearchExtBuilder parse(XContentParser parser) throws IOException { + String id; + XContentParser.Token token = parser.currentToken(); + if (token == XContentParser.Token.VALUE_STRING) { + id = parser.text(); + } else { + throw new ParsingException(parser.getTokenLocation(), "Expected a VALUE_STRING but got " + token); + } + if (id == null) { + throw new ParsingException(parser.getTokenLocation(), "no id specified for " + FIELD.getPreferredName()); + } + return new SimpleValueSearchExtBuilder(id); + } + } + + static class MapSearchExtBuilder extends SearchExtBuilder { + + private final static String EXT_FIELD = "map0"; + + private final Map map; + + public MapSearchExtBuilder(Map map) { + this.map = new HashMap<>(); + for (Map.Entry e : map.entrySet()) { + this.map.put(e.getKey(), e.getValue()); + } + } + + @Override + public String getWriteableName() { + return EXT_FIELD; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(this.map); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(EXT_FIELD, this.map); + } + + @Override + public int hashCode() { + return Objects.hash(this.getClass(), this.map); + } + + @Override + public boolean equals(Object obj) { + return false; + } + } + + static class ListSearchExtBuilder extends SearchExtBuilder { + + private final static String EXT_FIELD = "list0"; + + private final List list; + + public ListSearchExtBuilder(List list) { + this.list = new ArrayList<>(); + list.forEach(e -> this.list.add(e)); + } + + @Override + public String getWriteableName() { + return EXT_FIELD; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(this.list, StreamOutput::writeString); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field(EXT_FIELD, this.list); + } + + @Override + public int hashCode() { + return Objects.hash(this.getClass(), this.list); + } + + @Override + public boolean equals(Object obj) { + return false; + } + } +} diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index bfeb1d51a7578..390f032ef4a79 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -1273,7 +1273,8 @@ public void testConcurrentSegmentSearchSearchContext() throws IOException { .get() .getSetting(index, IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey()) ); - assertEquals(concurrentSearchEnabled, searchContext.isConcurrentSegmentSearchEnabled()); + searchContext.evaluateRequestShouldUseConcurrentSearch(); + assertEquals(concurrentSearchEnabled, searchContext.shouldUseConcurrentSearch()); // verify executor nullability with concurrent search enabled/disabled if (concurrentSearchEnabled) { assertNotNull(searchContext.searcher().getExecutor()); @@ -1327,7 +1328,8 @@ public void testConcurrentSegmentSearchIsSetOnceDuringContextCreation() throws I .get(); try (DefaultSearchContext searchContext = service.createSearchContext(request, new TimeValue(System.currentTimeMillis()))) { // verify concurrent search state in context - assertEquals(concurrentSearchSetting, searchContext.isConcurrentSegmentSearchEnabled()); + searchContext.evaluateRequestShouldUseConcurrentSearch(); + assertEquals(concurrentSearchSetting, searchContext.shouldUseConcurrentSearch()); // verify executor state in searcher assertEquals(concurrentSearchSetting, (searchContext.searcher().getExecutor() != null)); @@ -1341,7 +1343,7 @@ public void testConcurrentSegmentSearchIsSetOnceDuringContextCreation() throws I .get(); // verify that concurrent segment search is still set to same expected value for the context - assertEquals(concurrentSearchSetting, searchContext.isConcurrentSegmentSearchEnabled()); + assertEquals(concurrentSearchSetting, searchContext.shouldUseConcurrentSearch()); } } @@ -1753,7 +1755,7 @@ public void testCanMatchSearchAfterAscGreaterThanMax() throws IOException { MinAndMax minMax = new MinAndMax(0L, 9L); FieldSortBuilder primarySort = new FieldSortBuilder("test"); primarySort.order(SortOrder.ASC); - assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), false); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, SearchContext.TRACK_TOTAL_HITS_DISABLED), false); } /** @@ -1766,7 +1768,7 @@ public void testCanMatchSearchAfterAscLessThanMax() throws IOException { MinAndMax minMax = new MinAndMax(0L, 9L); FieldSortBuilder primarySort = new FieldSortBuilder("test"); primarySort.order(SortOrder.ASC); - assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), true); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, SearchContext.TRACK_TOTAL_HITS_DISABLED), true); } /** @@ -1779,7 +1781,7 @@ public void testCanMatchSearchAfterAscEqualMax() throws IOException { MinAndMax minMax = new MinAndMax(0L, 9L); FieldSortBuilder primarySort = new FieldSortBuilder("test"); primarySort.order(SortOrder.ASC); - assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), true); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, SearchContext.TRACK_TOTAL_HITS_DISABLED), true); } /** @@ -1792,7 +1794,7 @@ public void testCanMatchSearchAfterDescGreaterThanMin() throws IOException { MinAndMax minMax = new MinAndMax(0L, 9L); FieldSortBuilder primarySort = new FieldSortBuilder("test"); primarySort.order(SortOrder.DESC); - assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), true); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, SearchContext.TRACK_TOTAL_HITS_DISABLED), true); } /** @@ -1805,7 +1807,7 @@ public void testCanMatchSearchAfterDescLessThanMin() throws IOException { MinAndMax minMax = new MinAndMax(0L, 9L); FieldSortBuilder primarySort = new FieldSortBuilder("test"); primarySort.order(SortOrder.DESC); - assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), false); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, SearchContext.TRACK_TOTAL_HITS_DISABLED), false); } /** @@ -1818,7 +1820,7 @@ public void testCanMatchSearchAfterDescEqualMin() throws IOException { MinAndMax minMax = new MinAndMax(0L, 9L); FieldSortBuilder primarySort = new FieldSortBuilder("test"); primarySort.order(SortOrder.DESC); - assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), true); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, SearchContext.TRACK_TOTAL_HITS_DISABLED), true); } /** @@ -1832,9 +1834,24 @@ public void testCanMatchSearchAfterWithMissing() throws IOException { FieldSortBuilder primarySort = new FieldSortBuilder("test"); primarySort.order(SortOrder.DESC); // Should be false without missing values - assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), false); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, SearchContext.TRACK_TOTAL_HITS_DISABLED), false); primarySort.missing("_last"); // Should be true with missing values - assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), true); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, SearchContext.TRACK_TOTAL_HITS_DISABLED), true); + } + + /** + * Test for DESC order search_after query with track_total_hits=true. + * Min = 0L, Max = 9L, search_after = -1L + * With above min/max and search_after, it should not match, but since + * track_total_hits = true, + * Expected result is canMatch = true + */ + public void testCanMatchSearchAfterDescLessThanMinWithTrackTotalhits() throws IOException { + FieldDoc searchAfter = new FieldDoc(0, 0, new Long[] { -1L }); + MinAndMax minMax = new MinAndMax(0L, 9L); + FieldSortBuilder primarySort = new FieldSortBuilder("test"); + primarySort.order(SortOrder.DESC); + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort, 1000), true); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregatorBaseTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregatorBaseTests.java index 728b7162bf478..ce96623ea06df 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregatorBaseTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregatorBaseTests.java @@ -135,7 +135,7 @@ private ValuesSourceConfig getVSConfig( indexed, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), resolution, null, Collections.emptyMap() @@ -184,7 +184,7 @@ public void testShortcutIsApplicable() throws IOException { assertNull(pointReaderShim(mockSearchContext(null), null, getVSConfig("number", resolution, false, context))); } // Check that we decode a dates "just like" the doc values instance. - Instant expected = Instant.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2020-01-01T00:00:00Z")); + Instant expected = Instant.from(DateFieldMapper.getDefaultDateTimeFormatter().parse("2020-01-01T00:00:00Z")); byte[] scratch = new byte[8]; LongPoint.encodeDimension(DateFieldMapper.Resolution.MILLISECONDS.convert(expected), scratch, 0); assertThat( diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/DateScriptMocksPlugin.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/DateScriptMocksPlugin.java index 5a9a9e2b6cb51..d6ba4eedd3a19 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/DateScriptMocksPlugin.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/DateScriptMocksPlugin.java @@ -43,7 +43,7 @@ /** * Mock scripts shared by DateRangeIT and DateHistogramIT. - * + *

            * Provides {@link DateScriptMocksPlugin#EXTRACT_FIELD}, {@link DateScriptMocksPlugin#DOUBLE_PLUS_ONE_MONTH}, * and {@link DateScriptMocksPlugin#LONG_PLUS_ONE_MONTH} scripts. */ diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java index d6981d1c34652..21d05305eed1b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/ShardSizeTestCase.java @@ -32,21 +32,44 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.is; -public abstract class ShardSizeTestCase extends OpenSearchIntegTestCase { +public abstract class ShardSizeTestCase extends ParameterizedOpenSearchIntegTestCase { + + public ShardSizeTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected int numberOfShards() { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java index aa51f9b11ea19..39054f15826f0 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java @@ -44,7 +44,7 @@ /** * Duplicates the tests from {@link CompositeAggregationBuilderTests}, except using the deprecated * interval on date histo. Separated to make testing the warnings easier. - * + *

            * Can be removed in when the legacy interval options are gone */ public class LegacyIntervalCompositeAggBuilderTests extends BaseAggregationTestCase { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTestCase.java index f5f4f3d4b0723..fdbc0160e51a3 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTestCase.java @@ -125,7 +125,7 @@ protected final DateFieldMapper.DateFieldType aggregableDateFieldType(boolean us isSearchable, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), useNanosecondResolution ? DateFieldMapper.Resolution.NANOSECONDS : DateFieldMapper.Resolution.MILLISECONDS, null, Collections.emptyMap() diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java index 4e8dc20c465a9..bca6623e66104 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java @@ -1245,7 +1245,7 @@ private void testSearchCase( } private static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } private static long asLong(String dateTime, DateFieldMapper.DateFieldType fieldType) { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java index 08adc6e3d550a..5c12d070824f2 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java @@ -1086,7 +1086,7 @@ private void testCase( } private static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } private static ZonedDateTime asZDT(String dateTime) { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java index 26ccc1075220b..96c8be1a25cc3 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/DateRangeAggregatorTests.java @@ -273,7 +273,7 @@ private void testCase( true, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), resolution, null, Collections.emptyMap() diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorTests.java index 761615ad1d7eb..dd7ae915c3b45 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/RangeAggregatorTests.java @@ -136,7 +136,7 @@ public void testDateFieldNanosecondResolution() throws IOException { true, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), DateFieldMapper.Resolution.NANOSECONDS, null, Collections.emptyMap() @@ -167,7 +167,7 @@ public void testMissingDateWithDateField() throws IOException { true, false, true, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, + DateFieldMapper.getDefaultDateTimeFormatter(), DateFieldMapper.Resolution.NANOSECONDS, null, Collections.emptyMap() diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java index 21453bbd17375..e02c00005df9b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/AbstractGeoTestCase.java @@ -32,11 +32,14 @@ package org.opensearch.search.aggregations.metrics; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.document.DocumentField; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; @@ -45,20 +48,24 @@ import org.opensearch.search.sort.SortBuilders; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import org.opensearch.test.geo.RandomGeoGenerator; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.SuiteScopeTestCase -public abstract class AbstractGeoTestCase extends OpenSearchIntegTestCase { +public abstract class AbstractGeoTestCase extends ParameterizedOpenSearchIntegTestCase { protected static final String SINGLE_VALUED_FIELD_NAME = "geo_value"; protected static final String MULTI_VALUED_FIELD_NAME = "geo_values"; @@ -69,7 +76,6 @@ public abstract class AbstractGeoTestCase extends OpenSearchIntegTestCase { protected static final String DATELINE_IDX_NAME = "dateline_idx"; protected static final String HIGH_CARD_IDX_NAME = "high_card_idx"; protected static final String IDX_ZERO_NAME = "idx_zero"; - protected static int numDocs; protected static int numUniqueGeoPoints; protected static GeoPoint[] singleValues, multiValues; @@ -79,6 +85,23 @@ public abstract class AbstractGeoTestCase extends OpenSearchIntegTestCase { protected static Map expectedCentroidsForGeoHash = null; protected static final double GEOHASH_TOLERANCE = 1E-5D; + public AbstractGeoTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex(UNMAPPED_IDX_NAME); diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/MetricAggScriptPlugin.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/MetricAggScriptPlugin.java index e29eb95a52cd7..b4dbc8017bc3b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/MetricAggScriptPlugin.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/MetricAggScriptPlugin.java @@ -45,7 +45,7 @@ /** * Provides a number of dummy scripts for tests. - * + *

            * Each script provided allows for an {@code inc} parameter which will * be added to each value read from a document. */ diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java index 3073cb6d35ddf..de213a154c3c5 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java @@ -80,15 +80,15 @@ public class AvgBucketAggregatorTests extends AggregatorTestCase { /** * Test for issue #30608. Under the following circumstances: - * + *

            * A. Multi-bucket agg in the first entry of our internal list * B. Regular agg as the immediate child of the multi-bucket in A * C. Regular agg with the same name as B at the top level, listed as the second entry in our internal list * D. Finally, a pipeline agg with the path down to B - * + *

            * BucketMetrics reduction would throw a class cast exception due to bad subpathing. This test ensures * it is fixed. - * + *

            * Note: we have this test inside of the `avg_bucket` package so that we can get access to the package-private * `reduce()` needed for testing this */ @@ -144,6 +144,6 @@ public void testSameAggNames() throws IOException { } private static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java index 17846234a09d1..5d7636208bd70 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java @@ -344,6 +344,6 @@ private void executeTestCase( } private static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/pipeline/MovFnAggrgatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/pipeline/MovFnAggrgatorTests.java index 3ae00efaa6da3..d6abe50ea5201 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/pipeline/MovFnAggrgatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/pipeline/MovFnAggrgatorTests.java @@ -169,6 +169,6 @@ private void executeTestCase(Query query, DateHistogramAggregationBuilder aggBui } private static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java index 2d125b8d36542..f0d930c4c3acb 100644 --- a/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java +++ b/server/src/test/java/org/opensearch/search/backpressure/SearchBackpressureServiceTests.java @@ -32,6 +32,7 @@ import org.opensearch.tasks.TaskCancellationService; import org.opensearch.tasks.TaskManager; import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -70,7 +71,7 @@ public class SearchBackpressureServiceTests extends OpenSearchTestCase { @Before public void setup() { threadPool = new TestThreadPool(getClass().getName()); - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, NoopTracer.INSTANCE); transportService.start(); transportService.acceptIncomingRequests(); taskManager = transportService.getTaskManager(); diff --git a/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java b/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java index 5b6e668175748..b1f70dfce176c 100644 --- a/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java +++ b/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java @@ -81,7 +81,7 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.cache.bitset.BitsetFilterCache; import org.opensearch.index.shard.IndexShard; -import org.opensearch.search.SearchBootstrapSettings; +import org.opensearch.search.SearchService; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; @@ -92,6 +92,7 @@ import java.util.IdentityHashMap; import java.util.List; import java.util.Set; +import java.util.concurrent.ExecutorService; import static org.opensearch.search.internal.ContextIndexSearcher.intersectScorerAndBitSet; import static org.opensearch.search.internal.ExitableDirectoryReader.ExitableLeafReader; @@ -308,55 +309,119 @@ public void onRemoval(ShardId shardId, Accountable accountable) { public void testSlicesInternal() throws Exception { final List leaves = getLeaves(10); + try ( + final Directory directory = newDirectory(); + IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + Document document = new Document(); + document.add(new StringField("field1", "value1", Field.Store.NO)); + document.add(new StringField("field2", "value1", Field.Store.NO)); + iw.addDocument(document); + iw.commit(); + try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { + SearchContext searchContext = mock(SearchContext.class); + IndexShard indexShard = mock(IndexShard.class); + when(searchContext.indexShard()).thenReturn(indexShard); + when(searchContext.bucketCollectorProcessor()).thenReturn(SearchContext.NO_OP_BUCKET_COLLECTOR_PROCESSOR); + ContextIndexSearcher searcher = new ContextIndexSearcher( + directoryReader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true, + null, + searchContext + ); + // Case 1: Verify the slice count when lucene default slice computation is used + IndexSearcher.LeafSlice[] slices = searcher.slicesInternal( + leaves, + SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE + ); + int expectedSliceCount = 2; + // 2 slices will be created since max segment per slice of 5 will be reached + assertEquals(expectedSliceCount, slices.length); + for (int i = 0; i < expectedSliceCount; ++i) { + assertEquals(5, slices[i].leaves.length); + } - final Directory directory = newDirectory(); - IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE)); - Document document = new Document(); - document.add(new StringField("field1", "value1", Field.Store.NO)); - document.add(new StringField("field2", "value1", Field.Store.NO)); - iw.addDocument(document); - iw.commit(); - DirectoryReader directoryReader = DirectoryReader.open(directory); - - SearchContext searchContext = mock(SearchContext.class); - IndexShard indexShard = mock(IndexShard.class); - when(searchContext.indexShard()).thenReturn(indexShard); - when(searchContext.bucketCollectorProcessor()).thenReturn(SearchContext.NO_OP_BUCKET_COLLECTOR_PROCESSOR); - ContextIndexSearcher searcher = new ContextIndexSearcher( - directoryReader, - IndexSearcher.getDefaultSimilarity(), - IndexSearcher.getDefaultQueryCache(), - IndexSearcher.getDefaultQueryCachingPolicy(), - true, - null, - searchContext - ); - // Case 1: Verify the slice count when lucene default slice computation is used - IndexSearcher.LeafSlice[] slices = searcher.slicesInternal( - leaves, - SearchBootstrapSettings.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE - ); - int expectedSliceCount = 2; - // 2 slices will be created since max segment per slice of 5 will be reached - assertEquals(expectedSliceCount, slices.length); - for (int i = 0; i < expectedSliceCount; ++i) { - assertEquals(5, slices[i].leaves.length); + // Case 2: Verify the slice count when custom max slice computation is used + expectedSliceCount = 4; + slices = searcher.slicesInternal(leaves, expectedSliceCount); + + // 4 slices will be created with 3 leaves in first 2 slices and 2 leaves in other slices + assertEquals(expectedSliceCount, slices.length); + for (int i = 0; i < expectedSliceCount; ++i) { + if (i < 2) { + assertEquals(3, slices[i].leaves.length); + } else { + assertEquals(2, slices[i].leaves.length); + } + } + } } + } - // Case 2: Verify the slice count when custom max slice computation is used - expectedSliceCount = 4; - slices = searcher.slicesInternal(leaves, expectedSliceCount); - - // 4 slices will be created with 3 leaves in first 2 slices and 2 leaves in other slices - assertEquals(expectedSliceCount, slices.length); - for (int i = 0; i < expectedSliceCount; ++i) { - if (i < 2) { - assertEquals(3, slices[i].leaves.length); - } else { - assertEquals(2, slices[i].leaves.length); + public void testGetSlicesWithNonNullExecutorButCSDisabled() throws Exception { + final List leaves = getLeaves(10); + try ( + final Directory directory = newDirectory(); + IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + Document document = new Document(); + document.add(new StringField("field1", "value1", Field.Store.NO)); + document.add(new StringField("field2", "value1", Field.Store.NO)); + iw.addDocument(document); + iw.commit(); + try (DirectoryReader directoryReader = DirectoryReader.open(directory);) { + SearchContext searchContext = mock(SearchContext.class); + IndexShard indexShard = mock(IndexShard.class); + when(searchContext.indexShard()).thenReturn(indexShard); + when(searchContext.bucketCollectorProcessor()).thenReturn(SearchContext.NO_OP_BUCKET_COLLECTOR_PROCESSOR); + when(searchContext.shouldUseConcurrentSearch()).thenReturn(false); + ContextIndexSearcher searcher = new ContextIndexSearcher( + directoryReader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true, + null, + searchContext + ); + // Case 1: Verify getSlices return null when concurrent segment search is disabled + assertNull(searcher.getSlices()); + + // Case 2: Verify the slice count when custom max slice computation is used + searcher = new ContextIndexSearcher( + directoryReader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true, + mock(ExecutorService.class), + searchContext + ); + when(searchContext.shouldUseConcurrentSearch()).thenReturn(true); + when(searchContext.getTargetMaxSliceCount()).thenReturn(4); + int expectedSliceCount = 4; + IndexSearcher.LeafSlice[] slices = searcher.slices(leaves); + + // 4 slices will be created with 3 leaves in first 2 slices and 2 leaves in other slices + assertEquals(expectedSliceCount, slices.length); + for (int i = 0; i < expectedSliceCount; ++i) { + if (i < 2) { + assertEquals(3, slices[i].leaves.length); + } else { + assertEquals(2, slices[i].leaves.length); + } + } } } - IOUtils.close(directoryReader, iw, directory); } private SparseFixedBitSet query(LeafReaderContext leaf, String field, String value) throws IOException { diff --git a/server/src/test/java/org/opensearch/search/internal/IndexReaderUtils.java b/server/src/test/java/org/opensearch/search/internal/IndexReaderUtils.java index a87bb8a52cdd0..16958da77f1a3 100644 --- a/server/src/test/java/org/opensearch/search/internal/IndexReaderUtils.java +++ b/server/src/test/java/org/opensearch/search/internal/IndexReaderUtils.java @@ -31,21 +31,25 @@ public class IndexReaderUtils { * @return created leaves */ public static List getLeaves(int leafCount) throws Exception { - final Directory directory = newDirectory(); - IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE)); - for (int i = 0; i < leafCount; ++i) { - Document document = new Document(); - final String fieldValue = "value" + i; - document.add(new StringField("field1", fieldValue, Field.Store.NO)); - document.add(new StringField("field2", fieldValue, Field.Store.NO)); - iw.addDocument(document); - iw.commit(); + try ( + final Directory directory = newDirectory(); + final IndexWriter iw = new IndexWriter( + directory, + new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE) + ) + ) { + for (int i = 0; i < leafCount; ++i) { + Document document = new Document(); + final String fieldValue = "value" + i; + document.add(new StringField("field1", fieldValue, Field.Store.NO)); + document.add(new StringField("field2", fieldValue, Field.Store.NO)); + iw.addDocument(document); + iw.commit(); + } + try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { + List leaves = directoryReader.leaves(); + return leaves; + } } - iw.close(); - DirectoryReader directoryReader = DirectoryReader.open(directory); - List leaves = directoryReader.leaves(); - directoryReader.close(); - directory.close(); - return leaves; } } diff --git a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineInfoTests.java b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineInfoTests.java index c19c1ebcb5c26..ce7344fa0d2d6 100644 --- a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineInfoTests.java +++ b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineInfoTests.java @@ -46,7 +46,7 @@ public void testSerializationRoundtrip() throws IOException { /** * When serializing / deserializing to / from old versions, processor type info is lost. - * + *

            * Also, we only supported request/response processors. */ public void testSerializationRoundtripBackcompat() throws IOException { diff --git a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java index d44bd3831281f..98d2a7e84d672 100644 --- a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java +++ b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java @@ -41,6 +41,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.breaker.NoopCircuitBreaker; import org.opensearch.core.common.bytes.BytesArray; @@ -194,7 +195,7 @@ public void testResolveIndexDefaultPipeline() throws Exception { service.applyClusterState(cce); SearchRequest searchRequest = new SearchRequest("my_index").source(SearchSourceBuilder.searchSource().size(5)); - PipelinedRequest pipelinedRequest = service.resolvePipeline(searchRequest); + PipelinedRequest pipelinedRequest = syncTransformRequest(service.resolvePipeline(searchRequest)); assertEquals("p1", pipelinedRequest.getPipeline().getId()); assertEquals(10, pipelinedRequest.source().size()); @@ -597,7 +598,7 @@ public void testTransformRequest() throws Exception { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(new TermQueryBuilder("foo", "bar")).size(size); SearchRequest request = new SearchRequest("_index").source(sourceBuilder).pipeline("p1"); - PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(request); + PipelinedRequest pipelinedRequest = syncTransformRequest(searchPipelineService.resolvePipeline(request)); assertEquals(2 * size, pipelinedRequest.source().size()); assertEquals(size, request.source().size()); @@ -641,19 +642,57 @@ public void testTransformResponse() throws Exception { // First try without specifying a pipeline, which should be a no-op. SearchRequest searchRequest = new SearchRequest(); PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest); - SearchResponse notTransformedResponse = pipelinedRequest.transformResponse(searchResponse); + SearchResponse notTransformedResponse = syncTransformResponse(pipelinedRequest, searchResponse); assertSame(searchResponse, notTransformedResponse); // Now apply a pipeline searchRequest = new SearchRequest().pipeline("p1"); pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest); - SearchResponse transformedResponse = pipelinedRequest.transformResponse(searchResponse); + SearchResponse transformedResponse = syncTransformResponse(pipelinedRequest, searchResponse); assertEquals(size, transformedResponse.getHits().getHits().length); for (int i = 0; i < size; i++) { assertEquals(2.0, transformedResponse.getHits().getHits()[i].getScore(), 0.0001f); } } + /** + * Helper to synchronously apply a response pipeline, returning the transformed response. + */ + private static SearchResponse syncTransformResponse(PipelinedRequest pipelinedRequest, SearchResponse searchResponse) throws Exception { + SearchResponse[] responseBox = new SearchResponse[1]; + Exception[] exceptionBox = new Exception[1]; + ActionListener responseListener = pipelinedRequest.transformResponseListener(ActionListener.wrap(r -> { + responseBox[0] = r; + }, e -> { exceptionBox[0] = e; })); + responseListener.onResponse(searchResponse); + + if (exceptionBox[0] != null) { + throw exceptionBox[0]; + } + return responseBox[0]; + } + + /** + * Helper to synchronously apply a request pipeline, returning the transformed request. + */ + private static PipelinedRequest syncTransformRequest(PipelinedRequest request) throws Exception { + PipelinedRequest[] requestBox = new PipelinedRequest[1]; + Exception[] exceptionBox = new Exception[1]; + + request.transformRequest(ActionListener.wrap(r -> requestBox[0] = (PipelinedRequest) r, e -> exceptionBox[0] = e)); + if (exceptionBox[0] != null) { + throw exceptionBox[0]; + } + return requestBox[0]; + } + + /** + * Helper to synchronously apply a request pipeline and response pipeline, returning the transformed response. + */ + private static SearchResponse syncExecutePipeline(PipelinedRequest request, SearchResponse response) throws Exception { + return syncTransformResponse(syncTransformRequest(request), response); + } + public void testTransformSearchPhase() { SearchPipelineService searchPipelineService = createWithProcessors(); SearchPipelineMetadata metadata = new SearchPipelineMetadata( @@ -875,7 +914,7 @@ public void testInlinePipeline() throws Exception { SearchRequest searchRequest = new SearchRequest().source(sourceBuilder); // Verify pipeline - PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest); + PipelinedRequest pipelinedRequest = syncTransformRequest(searchPipelineService.resolvePipeline(searchRequest)); Pipeline pipeline = pipelinedRequest.getPipeline(); assertEquals(SearchPipelineService.AD_HOC_PIPELINE_ID, pipeline.getId()); assertEquals(1, pipeline.getSearchRequestProcessors().size()); @@ -894,7 +933,7 @@ public void testInlinePipeline() throws Exception { SearchResponseSections searchResponseSections = new SearchResponseSections(searchHits, null, null, false, false, null, 0); SearchResponse searchResponse = new SearchResponse(searchResponseSections, null, 1, 1, 0, 10, null, null); - SearchResponse transformedResponse = pipeline.transformResponse(searchRequest, searchResponse); + SearchResponse transformedResponse = syncTransformResponse(pipelinedRequest, searchResponse); for (int i = 0; i < size; i++) { assertEquals(2.0, transformedResponse.getHits().getHits()[i].getScore(), 0.0001); } @@ -946,7 +985,10 @@ public void testExceptionOnRequestProcessing() { SearchRequest searchRequest = new SearchRequest().source(sourceBuilder); // Exception thrown when processing the request - expectThrows(SearchPipelineProcessingException.class, () -> searchPipelineService.resolvePipeline(searchRequest)); + expectThrows( + SearchPipelineProcessingException.class, + () -> syncTransformRequest(searchPipelineService.resolvePipeline(searchRequest)) + ); } public void testExceptionOnResponseProcessing() throws Exception { @@ -974,10 +1016,10 @@ public void testExceptionOnResponseProcessing() throws Exception { SearchResponse response = new SearchResponse(null, null, 0, 0, 0, 0, null, null); // Exception thrown when processing response - expectThrows(SearchPipelineProcessingException.class, () -> pipelinedRequest.transformResponse(response)); + expectThrows(SearchPipelineProcessingException.class, () -> syncTransformResponse(pipelinedRequest, response)); } - public void testCatchExceptionOnRequestProcessing() throws IllegalAccessException { + public void testCatchExceptionOnRequestProcessing() throws Exception { SearchRequestProcessor throwingRequestProcessor = new FakeRequestProcessor("throwing_request", null, null, true, r -> { throw new RuntimeException(); }); @@ -1008,7 +1050,7 @@ public void testCatchExceptionOnRequestProcessing() throws IllegalAccessExceptio "The exception from request processor [throwing_request] in the search pipeline [_ad_hoc_pipeline] was ignored" ) ); - PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest); + syncTransformRequest(searchPipelineService.resolvePipeline(searchRequest)); mockAppender.assertAllExpectationsMatched(); } } @@ -1048,7 +1090,7 @@ public void testCatchExceptionOnResponseProcessing() throws Exception { "The exception from response processor [throwing_response] in the search pipeline [_ad_hoc_pipeline] was ignored" ) ); - pipelinedRequest.transformResponse(response); + syncTransformResponse(pipelinedRequest, response); mockAppender.assertAllExpectationsMatched(); } } @@ -1078,15 +1120,15 @@ public void testStats() throws Exception { SearchRequest request = new SearchRequest(); SearchResponse response = new SearchResponse(null, null, 0, 0, 0, 0, null, null); - searchPipelineService.resolvePipeline(request.pipeline("good_request_pipeline")).transformResponse(response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("good_request_pipeline")), response); expectThrows( SearchPipelineProcessingException.class, - () -> searchPipelineService.resolvePipeline(request.pipeline("bad_request_pipeline")).transformResponse(response) + () -> syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("bad_request_pipeline")), response) ); - searchPipelineService.resolvePipeline(request.pipeline("good_response_pipeline")).transformResponse(response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("good_response_pipeline")), response); expectThrows( SearchPipelineProcessingException.class, - () -> searchPipelineService.resolvePipeline(request.pipeline("bad_response_pipeline")).transformResponse(response) + () -> syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("bad_response_pipeline")), response) ); SearchPipelineStats stats = searchPipelineService.stats(); @@ -1164,12 +1206,12 @@ public void testStatsEnabledIgnoreFailure() throws Exception { SearchRequest request = new SearchRequest(); SearchResponse response = new SearchResponse(null, null, 0, 0, 0, 0, null, null); - searchPipelineService.resolvePipeline(request.pipeline("good_request_pipeline")).transformResponse(response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("good_request_pipeline")), response); // Caught Exception here - searchPipelineService.resolvePipeline(request.pipeline("bad_request_pipeline")).transformResponse(response); - searchPipelineService.resolvePipeline(request.pipeline("good_response_pipeline")).transformResponse(response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("bad_request_pipeline")), response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("good_response_pipeline")), response); // Caught Exception here - searchPipelineService.resolvePipeline(request.pipeline("bad_response_pipeline")).transformResponse(response); + syncExecutePipeline(searchPipelineService.resolvePipeline(request.pipeline("bad_response_pipeline")), response); // when ignoreFailure enabled, the search pipelines will all succeed. SearchPipelineStats stats = searchPipelineService.stats(); @@ -1273,8 +1315,8 @@ private SearchPipelineService getSearchPipelineService( } private static void assertPipelineStats(OperationStats stats, long count, long failed) { - assertEquals(stats.getCount(), count); - assertEquals(stats.getFailedCount(), failed); + assertEquals(count, stats.getCount()); + assertEquals(failed, stats.getFailedCount()); } public void testAdHocRejectingProcessor() { diff --git a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java new file mode 100644 index 0000000000000..db14eb90ef839 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfileBreakdownTests.java @@ -0,0 +1,432 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.search.profile.query; + +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Collector; +import org.apache.lucene.store.Directory; +import org.opensearch.search.profile.AbstractProfileBreakdown; +import org.opensearch.search.profile.Timer; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.search.profile.AbstractProfileBreakdown.TIMING_TYPE_COUNT_SUFFIX; +import static org.opensearch.search.profile.AbstractProfileBreakdown.TIMING_TYPE_START_TIME_SUFFIX; +import static org.opensearch.search.profile.query.ConcurrentQueryProfileBreakdown.MIN_PREFIX; +import static org.opensearch.search.profile.query.ConcurrentQueryProfileBreakdown.SLICE_END_TIME_SUFFIX; +import static org.opensearch.search.profile.query.ConcurrentQueryProfileBreakdown.SLICE_START_TIME_SUFFIX; +import static org.mockito.Mockito.mock; + +public class ConcurrentQueryProfileBreakdownTests extends OpenSearchTestCase { + private ConcurrentQueryProfileBreakdown testQueryProfileBreakdown; + private Timer createWeightTimer; + + @Before + public void setup() { + testQueryProfileBreakdown = new ConcurrentQueryProfileBreakdown(); + createWeightTimer = testQueryProfileBreakdown.getTimer(QueryTimingType.CREATE_WEIGHT); + try { + createWeightTimer.start(); + Thread.sleep(10); + } catch (InterruptedException ex) { + // ignore + } finally { + createWeightTimer.stop(); + } + } + + public void testBreakdownMapWithNoLeafContext() throws Exception { + final Map queryBreakDownMap = testQueryProfileBreakdown.toBreakdownMap(); + assertFalse(queryBreakDownMap == null || queryBreakDownMap.isEmpty()); + assertEquals(66, queryBreakDownMap.size()); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + String timingTypeKey = queryTimingType.toString(); + String timingTypeCountKey = queryTimingType + TIMING_TYPE_COUNT_SUFFIX; + + if (queryTimingType.equals(QueryTimingType.CREATE_WEIGHT)) { + final long createWeightTime = queryBreakDownMap.get(timingTypeKey); + assertTrue(createWeightTime > 0); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for weight type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + // verify total/min/max/avg node time is same as weight time + assertEquals(createWeightTime, testQueryProfileBreakdown.toNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getAvgSliceNodeTime()); + continue; + } + assertEquals(0, (long) queryBreakDownMap.get(timingTypeKey)); + assertEquals(0, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey)); + assertEquals(0, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey)); + assertEquals(0, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeKey)); + assertEquals(0, (long) queryBreakDownMap.get(timingTypeCountKey)); + assertEquals(0, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey)); + assertEquals(0, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeCountKey)); + assertEquals(0, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey)); + } + } + + public void testBuildSliceLevelBreakdownWithSingleSlice() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(1); + final Directory directory = directoryReader.directory(); + final LeafReaderContext sliceLeaf = directoryReader.leaves().get(0); + final Collector sliceCollector = mock(Collector.class); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final AbstractProfileBreakdown leafProfileBreakdown = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap + ); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector, sliceLeaf); + testQueryProfileBreakdown.getContexts().put(sliceLeaf, leafProfileBreakdown); + final Map> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown(); + assertFalse(sliceBreakdownMap == null || sliceBreakdownMap.isEmpty()); + assertEquals(1, sliceBreakdownMap.size()); + assertTrue(sliceBreakdownMap.containsKey(sliceCollector)); + + final Map sliceBreakdown = sliceBreakdownMap.entrySet().iterator().next().getValue(); + for (QueryTimingType timingType : QueryTimingType.values()) { + String timingTypeKey = timingType.toString(); + String timingTypeCountKey = timingTypeKey + TIMING_TYPE_COUNT_SUFFIX; + + if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + // there should be no entry for create weight at slice level breakdown map + assertNull(sliceBreakdown.get(timingTypeKey)); + assertNull(sliceBreakdown.get(timingTypeCountKey)); + continue; + } + + // for other timing type we will have all the value and will be same as leaf breakdown as there is single slice and single leaf + assertEquals(leafProfileBreakdownMap.get(timingTypeKey), sliceBreakdown.get(timingTypeKey)); + assertEquals(leafProfileBreakdownMap.get(timingTypeCountKey), sliceBreakdown.get(timingTypeCountKey)); + assertEquals( + leafProfileBreakdownMap.get(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX), + sliceBreakdown.get(timingTypeKey + SLICE_START_TIME_SUFFIX) + ); + assertEquals( + leafProfileBreakdownMap.get(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX) + leafProfileBreakdownMap.get(timingTypeKey), + (long) sliceBreakdown.get(timingTypeKey + SLICE_END_TIME_SUFFIX) + ); + } + assertEquals(10, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + + public void testBuildSliceLevelBreakdownWithMultipleSlices() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(2); + final Directory directory = directoryReader.directory(); + final Collector sliceCollector_1 = mock(Collector.class); + final Collector sliceCollector_2 = mock(Collector.class); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEndTime + 40, 10, 1); + final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_1 + ); + final AbstractProfileBreakdown leafProfileBreakdown_2 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_2 + ); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_1, directoryReader.leaves().get(0)); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_2, directoryReader.leaves().get(1)); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(1), leafProfileBreakdown_2); + final Map> sliceBreakdownMap = testQueryProfileBreakdown.buildSliceLevelBreakdown(); + assertFalse(sliceBreakdownMap == null || sliceBreakdownMap.isEmpty()); + assertEquals(2, sliceBreakdownMap.size()); + + for (Map.Entry> sliceBreakdowns : sliceBreakdownMap.entrySet()) { + Map sliceBreakdown = sliceBreakdowns.getValue(); + Map leafProfileBreakdownMap; + if (sliceBreakdowns.getKey().equals(sliceCollector_1)) { + leafProfileBreakdownMap = leafProfileBreakdownMap_1; + } else { + leafProfileBreakdownMap = leafProfileBreakdownMap_2; + } + for (QueryTimingType timingType : QueryTimingType.values()) { + String timingTypeKey = timingType.toString(); + String timingTypeCountKey = timingTypeKey + TIMING_TYPE_COUNT_SUFFIX; + + if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + // there should be no entry for create weight at slice level breakdown map + assertNull(sliceBreakdown.get(timingTypeKey)); + assertNull(sliceBreakdown.get(timingTypeCountKey)); + continue; + } + + // for other timing type we will have all the value and will be same as leaf breakdown as there is single slice and single + // leaf + assertEquals(leafProfileBreakdownMap.get(timingTypeKey), sliceBreakdown.get(timingTypeKey)); + assertEquals(leafProfileBreakdownMap.get(timingTypeCountKey), sliceBreakdown.get(timingTypeCountKey)); + assertEquals( + leafProfileBreakdownMap.get(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX), + sliceBreakdown.get(timingTypeKey + SLICE_START_TIME_SUFFIX) + ); + assertEquals( + leafProfileBreakdownMap.get(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX) + leafProfileBreakdownMap.get(timingTypeKey), + (long) sliceBreakdown.get(timingTypeKey + SLICE_END_TIME_SUFFIX) + ); + } + } + + assertEquals(10, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + + public void testBreakDownMapWithMultipleSlices() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(2); + final Directory directory = directoryReader.directory(); + final Collector sliceCollector_1 = mock(Collector.class); + final Collector sliceCollector_2 = mock(Collector.class); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final Map leafProfileBreakdownMap_2 = getLeafBreakdownMap(createWeightEndTime + 40, 20, 1); + final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_1 + ); + final AbstractProfileBreakdown leafProfileBreakdown_2 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_2 + ); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_1, directoryReader.leaves().get(0)); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_2, directoryReader.leaves().get(1)); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(1), leafProfileBreakdown_2); + + Map queryBreakDownMap = testQueryProfileBreakdown.toBreakdownMap(); + assertFalse(queryBreakDownMap == null || queryBreakDownMap.isEmpty()); + assertEquals(66, queryBreakDownMap.size()); + + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + String timingTypeKey = queryTimingType.toString(); + String timingTypeCountKey = queryTimingType + TIMING_TYPE_COUNT_SUFFIX; + + if (queryTimingType.equals(QueryTimingType.CREATE_WEIGHT)) { + final long createWeightTime = queryBreakDownMap.get(timingTypeKey); + assertEquals(createWeightTimer.getApproximateTiming(), createWeightTime); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for weight type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + continue; + } + assertEquals(50, (long) queryBreakDownMap.get(timingTypeKey)); + assertEquals(20, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey)); + assertEquals(15, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey)); + assertEquals(10, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeKey)); + assertEquals(2, (long) queryBreakDownMap.get(timingTypeCountKey)); + assertEquals(1, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey)); + assertEquals(1, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeCountKey)); + assertEquals(1, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey)); + } + + assertEquals(20, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(10, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(15, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + + public void testBreakDownMapWithMultipleSlicesAndOneSliceWithNoLeafContext() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(2); + final Directory directory = directoryReader.directory(); + final Collector sliceCollector_1 = mock(Collector.class); + final Collector sliceCollector_2 = mock(Collector.class); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_1 + ); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_1, directoryReader.leaves().get(0)); + testQueryProfileBreakdown.associateCollectorToLeaves(sliceCollector_2, directoryReader.leaves().get(1)); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); + // leaf2 profile breakdown is not present in contexts map + + Map queryBreakDownMap = testQueryProfileBreakdown.toBreakdownMap(); + assertFalse(queryBreakDownMap == null || queryBreakDownMap.isEmpty()); + assertEquals(66, queryBreakDownMap.size()); + + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + String timingTypeKey = queryTimingType.toString(); + String timingTypeCountKey = queryTimingType + TIMING_TYPE_COUNT_SUFFIX; + + if (queryTimingType.equals(QueryTimingType.CREATE_WEIGHT)) { + final long createWeightTime = queryBreakDownMap.get(timingTypeKey); + assertEquals(createWeightTimer.getApproximateTiming(), createWeightTime); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for weight type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + continue; + } + assertEquals(10, (long) queryBreakDownMap.get(timingTypeKey)); + assertEquals(10, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey)); + assertEquals(5, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey)); + assertEquals(0, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeKey)); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + assertEquals(1, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey)); + // min of 0 means one of the slice didn't worked on any leaf context + assertEquals(0, (long) queryBreakDownMap.get(MIN_PREFIX + timingTypeCountKey)); + assertEquals(0, (long) queryBreakDownMap.get(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey)); + } + + assertEquals(10, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(5, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + + public void testOneLeafContextWithEmptySliceCollectorsToLeaves() throws Exception { + final DirectoryReader directoryReader = getDirectoryReader(1); + final Directory directory = directoryReader.directory(); + final long createWeightEarliestStartTime = createWeightTimer.getEarliestTimerStartTime(); + final long createWeightEndTime = createWeightEarliestStartTime + createWeightTimer.getApproximateTiming(); + final Map leafProfileBreakdownMap_1 = getLeafBreakdownMap(createWeightEndTime + 10, 10, 1); + final AbstractProfileBreakdown leafProfileBreakdown_1 = new TestQueryProfileBreakdown( + QueryTimingType.class, + leafProfileBreakdownMap_1 + ); + testQueryProfileBreakdown.getContexts().put(directoryReader.leaves().get(0), leafProfileBreakdown_1); + final Map queryBreakDownMap = testQueryProfileBreakdown.toBreakdownMap(); + assertFalse(queryBreakDownMap == null || queryBreakDownMap.isEmpty()); + assertEquals(26, queryBreakDownMap.size()); + for (QueryTimingType queryTimingType : QueryTimingType.values()) { + String timingTypeKey = queryTimingType.toString(); + String timingTypeCountKey = queryTimingType + TIMING_TYPE_COUNT_SUFFIX; + + if (queryTimingType.equals(QueryTimingType.CREATE_WEIGHT)) { + final long createWeightTime = queryBreakDownMap.get(timingTypeKey); + assertEquals(createWeightTimer.getApproximateTiming(), createWeightTime); + assertEquals(1, (long) queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for weight type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + continue; + } + assertNotNull(queryBreakDownMap.get(timingTypeKey)); + assertNotNull(queryBreakDownMap.get(timingTypeCountKey)); + // verify there is no min/max/avg for current breakdown type stats + assertFalse( + queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.MAX_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(MIN_PREFIX + timingTypeCountKey) + || queryBreakDownMap.containsKey(ConcurrentQueryProfileBreakdown.AVG_PREFIX + timingTypeCountKey) + ); + } + assertEquals(0, testQueryProfileBreakdown.getMaxSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getMinSliceNodeTime()); + assertEquals(0, testQueryProfileBreakdown.getAvgSliceNodeTime()); + directoryReader.close(); + directory.close(); + } + + private Map getLeafBreakdownMap(long startTime, long timeTaken, long count) { + Map leafBreakDownMap = new HashMap<>(); + for (QueryTimingType timingType : QueryTimingType.values()) { + if (timingType.equals(QueryTimingType.CREATE_WEIGHT)) { + // don't add anything + continue; + } + String timingTypeKey = timingType.toString(); + leafBreakDownMap.put(timingTypeKey, timeTaken); + leafBreakDownMap.put(timingTypeKey + TIMING_TYPE_COUNT_SUFFIX, count); + leafBreakDownMap.put(timingTypeKey + TIMING_TYPE_START_TIME_SUFFIX, startTime); + } + return leafBreakDownMap; + } + + private DirectoryReader getDirectoryReader(int numLeaves) throws Exception { + final Directory directory = newDirectory(); + IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE)); + + for (int i = 0; i < numLeaves; ++i) { + Document document = new Document(); + document.add(new StringField("field1", "value" + i, Field.Store.NO)); + document.add(new StringField("field2", "value" + i, Field.Store.NO)); + iw.addDocument(document); + iw.commit(); + } + iw.deleteDocuments(new Term("field1", "value3")); + iw.close(); + return DirectoryReader.open(directory); + } + + private static class TestQueryProfileBreakdown extends AbstractProfileBreakdown { + private Map breakdownMap; + + public TestQueryProfileBreakdown(Class clazz, Map breakdownMap) { + super(clazz); + this.breakdownMap = breakdownMap; + } + + @Override + public Map toBreakdownMap() { + return breakdownMap; + } + } +} diff --git a/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java new file mode 100644 index 0000000000000..736bbcdd9e8dd --- /dev/null +++ b/server/src/test/java/org/opensearch/search/profile/query/ConcurrentQueryProfilerTests.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.profile.query; + +import org.opensearch.search.profile.Timer; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.LinkedList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class ConcurrentQueryProfilerTests extends OpenSearchTestCase { + + public void testMergeRewriteTimeIntervals() { + ConcurrentQueryProfiler profiler = new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()); + List timers = new LinkedList<>(); + timers.add(new Timer(217134L, 1L, 1L, 0L, 553074511206907L)); + timers.add(new Timer(228954L, 1L, 1L, 0L, 553074509287335L)); + timers.add(new Timer(228954L, 1L, 1L, 0L, 553074509287336L)); + LinkedList mergedIntervals = profiler.mergeRewriteTimeIntervals(timers); + assertThat(mergedIntervals.size(), equalTo(2)); + long[] interval = mergedIntervals.get(0); + assertThat(interval[0], equalTo(553074509287335L)); + assertThat(interval[1], equalTo(553074509516290L)); + interval = mergedIntervals.get(1); + assertThat(interval[0], equalTo(553074511206907L)); + assertThat(interval[1], equalTo(553074511424041L)); + } +} diff --git a/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java b/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java index a278e7004cc93..481a224f2ff0e 100644 --- a/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java +++ b/server/src/test/java/org/opensearch/search/profile/query/QueryProfilerTests.java @@ -81,6 +81,9 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -89,6 +92,10 @@ public class QueryProfilerTests extends OpenSearchTestCase { private IndexReader reader; private ContextIndexSearcher searcher; private ExecutorService executor; + private static final String MAX_PREFIX = "max_"; + private static final String MIN_PREFIX = "min_"; + private static final String AVG_PREFIX = "avg_"; + private static final String TIMING_TYPE_COUNT_SUFFIX = "_count"; @ParametersFactory public static Collection concurrency() { @@ -154,13 +161,16 @@ public void tearDown() throws Exception { } public void testBasic() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.search(query, 1); List results = profiler.getTree(); assertEquals(1, results.size()); - Map breakdown = results.get(0).getTimeBreakdown(); + ProfileResult profileResult = results.get(0); + Map breakdown = profileResult.getTimeBreakdown(); assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString()), greaterThan(0L)); @@ -168,25 +178,68 @@ public void testBasic() throws IOException { assertThat(breakdown.get(QueryTimingType.SCORE.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.MATCH.toString()), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.ADVANCE.toString() + "_count"), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.SCORE.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.MATCH.toString() + "_count"), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + + if (executor != null) { + assertThat(profileResult.getMaxSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getMinSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getAvgSliceTime(), is(not(nullValue()))); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + } else { + assertThat(profileResult.getMaxSliceTime(), is(nullValue())); + assertThat(profileResult.getMinSliceTime(), is(nullValue())); + assertThat(profileResult.getAvgSliceTime(), is(nullValue())); + } long rewriteTime = profiler.getRewriteTime(); assertThat(rewriteTime, greaterThan(0L)); } public void testNoScoring() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.search(query, 1, Sort.INDEXORDER); // scores are not needed List results = profiler.getTree(); assertEquals(1, results.size()); - Map breakdown = results.get(0).getTimeBreakdown(); + ProfileResult profileResult = results.get(0); + Map breakdown = profileResult.getTimeBreakdown(); assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString()), greaterThan(0L)); @@ -194,19 +247,61 @@ public void testNoScoring() throws IOException { assertThat(breakdown.get(QueryTimingType.SCORE.toString()), equalTo(0L)); assertThat(breakdown.get(QueryTimingType.MATCH.toString()), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.ADVANCE.toString() + "_count"), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.SCORE.toString() + "_count"), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.MATCH.toString() + "_count"), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + + if (executor != null) { + assertThat(profileResult.getMaxSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getMinSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getAvgSliceTime(), is(not(nullValue()))); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + } else { + assertThat(profileResult.getMaxSliceTime(), is(nullValue())); + assertThat(profileResult.getMinSliceTime(), is(nullValue())); + assertThat(profileResult.getAvgSliceTime(), is(nullValue())); + } long rewriteTime = profiler.getRewriteTime(); assertThat(rewriteTime, greaterThan(0L)); } public void testUseIndexStats() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new TermQuery(new Term("foo", "bar")); searcher.count(query); // will use index stats @@ -220,13 +315,16 @@ public void testUseIndexStats() throws IOException { } public void testApproximations() throws IOException { - QueryProfiler profiler = new QueryProfiler(executor != null); + QueryProfiler profiler = executor != null + ? new ConcurrentQueryProfiler(new ConcurrentQueryProfileTree()) + : new QueryProfiler(new InternalQueryProfileTree()); searcher.setProfiler(profiler); Query query = new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()); searcher.count(query); List results = profiler.getTree(); assertEquals(1, results.size()); - Map breakdown = results.get(0).getTimeBreakdown(); + ProfileResult profileResult = results.get(0); + Map breakdown = profileResult.getTimeBreakdown(); assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString()), greaterThan(0L)); assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString()), greaterThan(0L)); @@ -234,12 +332,52 @@ public void testApproximations() throws IOException { assertThat(breakdown.get(QueryTimingType.SCORE.toString()), equalTo(0L)); assertThat(breakdown.get(QueryTimingType.MATCH.toString()), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.BUILD_SCORER.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.NEXT_DOC.toString() + "_count"), greaterThan(0L)); - assertThat(breakdown.get(QueryTimingType.ADVANCE.toString() + "_count"), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.SCORE.toString() + "_count"), equalTo(0L)); - assertThat(breakdown.get(QueryTimingType.MATCH.toString() + "_count"), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.CREATE_WEIGHT + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + + if (executor != null) { + assertThat(profileResult.getMaxSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getMinSliceTime(), is(not(nullValue()))); + assertThat(profileResult.getAvgSliceTime(), is(not(nullValue()))); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.BUILD_SCORER + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.NEXT_DOC + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.ADVANCE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.SCORE + TIMING_TYPE_COUNT_SUFFIX), equalTo(0L)); + assertThat(breakdown.get(MAX_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(MIN_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + assertThat(breakdown.get(AVG_PREFIX + QueryTimingType.MATCH + TIMING_TYPE_COUNT_SUFFIX), greaterThan(0L)); + } else { + assertThat(profileResult.getMaxSliceTime(), is(nullValue())); + assertThat(profileResult.getMinSliceTime(), is(nullValue())); + assertThat(profileResult.getAvgSliceTime(), is(nullValue())); + } long rewriteTime = profiler.getRewriteTime(); assertThat(rewriteTime, greaterThan(0L)); diff --git a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java index 7d350847b50e5..39126a607f968 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java @@ -1209,6 +1209,12 @@ private static ContextIndexSearcher newContextSearcher(IndexReader reader, Execu IndexShard indexShard = mock(IndexShard.class); when(searchContext.indexShard()).thenReturn(indexShard); when(searchContext.bucketCollectorProcessor()).thenReturn(SearchContext.NO_OP_BUCKET_COLLECTOR_PROCESSOR); + when(searchContext.shouldUseConcurrentSearch()).thenReturn(executor != null); + if (executor != null) { + when(searchContext.getTargetMaxSliceCount()).thenReturn(randomIntBetween(0, 2)); + } else { + when(searchContext.getTargetMaxSliceCount()).thenThrow(IllegalStateException.class); + } return new ContextIndexSearcher( reader, IndexSearcher.getDefaultSimilarity(), @@ -1226,6 +1232,12 @@ private static ContextIndexSearcher newEarlyTerminationContextSearcher(IndexRead IndexShard indexShard = mock(IndexShard.class); when(searchContext.indexShard()).thenReturn(indexShard); when(searchContext.bucketCollectorProcessor()).thenReturn(SearchContext.NO_OP_BUCKET_COLLECTOR_PROCESSOR); + when(searchContext.shouldUseConcurrentSearch()).thenReturn(executor != null); + if (executor != null) { + when(searchContext.getTargetMaxSliceCount()).thenReturn(randomIntBetween(0, 2)); + } else { + when(searchContext.getTargetMaxSliceCount()).thenThrow(IllegalStateException.class); + } return new ContextIndexSearcher( reader, IndexSearcher.getDefaultSimilarity(), diff --git a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java index 739ba6fc15c76..28af8a63cfba8 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java @@ -338,6 +338,14 @@ public void testMinScoreDisablesCountOptimization() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThanOrEqualTo(100L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(1L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThanOrEqualTo(100L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThanOrEqualTo(100L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThanOrEqualTo(100L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), equalTo(1L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), equalTo(1L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), equalTo(1L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); }, collector -> { @@ -477,6 +485,14 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); }, collector -> { @@ -547,6 +563,14 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); }, collector -> { @@ -585,6 +609,14 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); @@ -709,6 +741,14 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), greaterThan(0L)); + if (executor != null) { + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("max_score_count"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("min_score_count"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + } assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); @@ -716,6 +756,14 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score_count"), greaterThan(0L)); + if (executor != null) { + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("max_score_count"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("min_score_count"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + } }, collector -> { assertThat(collector.getReason(), equalTo("search_terminate_after_count")); assertThat(collector.getTime(), greaterThan(0L)); @@ -1054,6 +1102,14 @@ public void testDisableTopScoreCollection() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThanOrEqualTo(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(0L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); }, collector -> { @@ -1133,6 +1189,14 @@ public void testMinScore() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), equalTo(10L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), equalTo(10L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), equalTo(10L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), equalTo(10L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); @@ -1210,6 +1274,14 @@ public void testMaxScore() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThanOrEqualTo(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(4L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(0L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(1L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); @@ -1245,6 +1317,14 @@ public void testMaxScore() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(0L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(1L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); @@ -1315,6 +1395,14 @@ public void testCollapseQuerySearchResults() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(2L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(6L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); assertThat(query.getProfiledChildren(), empty()); @@ -1342,6 +1430,14 @@ public void testCollapseQuerySearchResults() throws Exception { assertThat(query.getTimeBreakdown().keySet(), not(empty())); assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); + if (executor != null) { + assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); + assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); + assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(2L)); + assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(6L)); + } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); assertThat(query.getProfiledChildren(), empty()); diff --git a/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java b/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java index 93be194b2d112..c5f36fcc01983 100644 --- a/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java +++ b/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java @@ -34,14 +34,20 @@ import org.opensearch.OpenSearchCorruptionException; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.fs.FsBlobContainer; import org.opensearch.common.blobstore.fs.FsBlobStore; +import org.opensearch.common.blobstore.stream.read.ReadContext; +import org.opensearch.common.blobstore.stream.write.WriteContext; +import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.compress.DeflateCompressor; import org.opensearch.common.io.Streams; import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.compress.CompressorRegistry; @@ -56,10 +62,17 @@ import java.io.EOFException; import java.io.IOException; import java.io.InputStream; +import java.nio.file.Path; import java.util.Map; +import java.util.concurrent.CountDownLatch; + +import org.mockito.ArgumentCaptor; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThan; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; public class BlobStoreFormatTests extends OpenSearchTestCase { @@ -114,6 +127,84 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par } } + public void testBlobStoreAsyncOperations() throws IOException, InterruptedException { + BlobStore blobStore = createTestBlobStore(); + MockFsVerifyingBlobContainer mockBlobContainer = new MockFsVerifyingBlobContainer( + (FsBlobStore) blobStore, + BlobPath.cleanPath(), + null + ); + MockFsVerifyingBlobContainer spyContainer = spy(mockBlobContainer); + ChecksumBlobStoreFormat checksumSMILE = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent); + ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + ArgumentCaptor writeContextArgumentCaptor = ArgumentCaptor.forClass(WriteContext.class); + CountDownLatch latch = new CountDownLatch(2); + + // Write blobs in different formats + checksumSMILE.writeAsync( + new BlobObj("checksum smile"), + spyContainer, + "check-smile", + CompressorRegistry.none(), + getVoidActionListener(latch), + ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS + ); + checksumSMILE.writeAsync( + new BlobObj("checksum smile compressed"), + spyContainer, + "check-smile-comp", + CompressorRegistry.getCompressor(DeflateCompressor.NAME), + getVoidActionListener(latch), + ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS + ); + + latch.await(); + + verify(spyContainer, times(2)).asyncBlobUpload(writeContextArgumentCaptor.capture(), actionListenerArgumentCaptor.capture()); + assertEquals(2, writeContextArgumentCaptor.getAllValues().size()); + writeContextArgumentCaptor.getAllValues() + .forEach(writeContext -> assertEquals(WritePriority.NORMAL, writeContext.getWritePriority())); + // Assert that all checksum blobs can be read + assertEquals(checksumSMILE.read(mockBlobContainer.getDelegate(), "check-smile", xContentRegistry()).getText(), "checksum smile"); + assertEquals( + checksumSMILE.read(mockBlobContainer.getDelegate(), "check-smile-comp", xContentRegistry()).getText(), + "checksum smile compressed" + ); + } + + public void testBlobStorePriorityAsyncOperation() throws IOException, InterruptedException { + BlobStore blobStore = createTestBlobStore(); + MockFsVerifyingBlobContainer mockBlobContainer = new MockFsVerifyingBlobContainer( + (FsBlobStore) blobStore, + BlobPath.cleanPath(), + null + ); + MockFsVerifyingBlobContainer spyContainer = spy(mockBlobContainer); + ChecksumBlobStoreFormat checksumSMILE = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent); + + ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); + ArgumentCaptor writeContextArgumentCaptor = ArgumentCaptor.forClass(WriteContext.class); + CountDownLatch latch = new CountDownLatch(1); + + // Write blobs in different formats + checksumSMILE.writeAsyncWithUrgentPriority( + new BlobObj("cluster state diff"), + spyContainer, + "cluster-state-diff", + CompressorRegistry.none(), + getVoidActionListener(latch), + ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS + ); + latch.await(); + + verify(spyContainer).asyncBlobUpload(writeContextArgumentCaptor.capture(), actionListenerArgumentCaptor.capture()); + assertEquals(WritePriority.URGENT, writeContextArgumentCaptor.getValue().getWritePriority()); + assertEquals( + checksumSMILE.read(mockBlobContainer.getDelegate(), "cluster-state-diff", xContentRegistry()).getText(), + "cluster state diff" + ); + } + public void testBlobStoreOperations() throws IOException { BlobStore blobStore = createTestBlobStore(); BlobContainer blobContainer = blobStore.blobContainer(BlobPath.cleanPath()); @@ -168,6 +259,24 @@ public void testBlobCorruption() throws IOException { } } + private ActionListener getVoidActionListener(CountDownLatch latch) { + ActionListener actionListener = new ActionListener<>() { + @Override + public void onResponse(Void unused) { + logger.info("---> Async write succeeded"); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + logger.info("---> Failure in async write"); + throw new RuntimeException("async write should not fail"); + } + }; + + return actionListener; + } + protected BlobStore createTestBlobStore() throws IOException { return new FsBlobStore(randomIntBetween(1, 8) * 1024, createTempDir(), false); } @@ -196,4 +305,35 @@ private long checksum(byte[] buffer) throws IOException { } } } + + public static class MockFsVerifyingBlobContainer extends FsBlobContainer implements AsyncMultiStreamBlobContainer { + + private BlobContainer delegate; + + public MockFsVerifyingBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path) { + super(blobStore, blobPath, path); + delegate = blobStore.blobContainer(BlobPath.cleanPath()); + } + + @Override + public void asyncBlobUpload(WriteContext writeContext, ActionListener completionListener) throws IOException { + InputStream inputStream = writeContext.getStreamProvider(Integer.MAX_VALUE).provideStream(0).getInputStream(); + delegate.writeBlob(writeContext.getFileName(), inputStream, writeContext.getFileSize(), true); + completionListener.onResponse(null); + } + + @Override + public void readBlobAsync(String blobName, ActionListener listener) { + throw new RuntimeException("read not supported"); + } + + @Override + public boolean remoteIntegrityCheckSupported() { + return false; + } + + public BlobContainer getDelegate() { + return delegate; + } + } } diff --git a/server/src/test/java/org/opensearch/snapshots/RepositoriesMetadataSerializationTests.java b/server/src/test/java/org/opensearch/snapshots/RepositoriesMetadataSerializationTests.java index e47f6d87a95e5..8550316a666e8 100644 --- a/server/src/test/java/org/opensearch/snapshots/RepositoriesMetadataSerializationTests.java +++ b/server/src/test/java/org/opensearch/snapshots/RepositoriesMetadataSerializationTests.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.Diff; +import org.opensearch.cluster.metadata.CryptoMetadata; import org.opensearch.cluster.metadata.Metadata.Custom; import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; @@ -57,13 +58,18 @@ protected Custom createTestInstance() { for (int i = 0; i < numberOfRepositories; i++) { // divide by 2 to not overflow when adding to this number for the pending generation below final long generation = randomNonNegativeLong() / 2L; + CryptoMetadata cryptoMetadata = null; + if (randomBoolean()) { + cryptoMetadata = new CryptoMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings()); + } entries.add( new RepositoryMetadata( randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings(), generation, - generation + randomLongBetween(0, generation) + generation + randomLongBetween(0, generation), + cryptoMetadata ) ); } @@ -81,7 +87,11 @@ protected Custom mutateInstance(Custom instance) { List entries = new ArrayList<>(((RepositoriesMetadata) instance).repositories()); boolean addEntry = entries.isEmpty() ? true : randomBoolean(); if (addEntry) { - entries.add(new RepositoryMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings())); + CryptoMetadata cryptoMetadata = null; + if (randomBoolean()) { + cryptoMetadata = new CryptoMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings()); + } + entries.add(new RepositoryMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings(), cryptoMetadata)); } else { entries.remove(randomIntBetween(0, entries.size() - 1)); } @@ -114,7 +124,11 @@ protected Custom makeTestChanges(Custom testInstance) { // add some elements int addElements = randomInt(10); for (int i = 0; i < addElements; i++) { - repos.add(new RepositoryMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings())); + CryptoMetadata cryptoMetadata = null; + if (randomBoolean()) { + cryptoMetadata = new CryptoMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings()); + } + repos.add(new RepositoryMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings(), cryptoMetadata)); } } return new RepositoriesMetadata(repos); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 4f7697660096e..b7a2baacba611 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -130,6 +130,8 @@ import org.opensearch.cluster.coordination.ElectionStrategy; import org.opensearch.cluster.coordination.InMemoryPersistedState; import org.opensearch.cluster.coordination.MockSinglePrioritizingExecutor; +import org.opensearch.cluster.coordination.PersistedStateRegistry; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.metadata.AliasValidator; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; @@ -176,8 +178,10 @@ import org.opensearch.gateway.TransportNodesListGatewayStartedShards; import org.opensearch.index.IndexingPressureService; import org.opensearch.index.SegmentReplicationPressureService; +import org.opensearch.index.SegmentReplicationStatsTracker; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.remote.RemoteStorePressureService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.seqno.GlobalCheckpointSyncAction; import org.opensearch.index.seqno.RetentionLeaseSyncer; import org.opensearch.index.shard.PrimaryReplicaSyncer; @@ -192,6 +196,7 @@ import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.indices.recovery.PeerRecoverySourceService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; @@ -202,6 +207,7 @@ import org.opensearch.ingest.IngestService; import org.opensearch.monitor.StatusInfo; import org.opensearch.node.ResponseCollectorService; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.plugins.PluginsService; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -217,6 +223,8 @@ import org.opensearch.search.query.QueryPhase; import org.opensearch.snapshots.mockstore.MockEventuallyConsistentRepository; import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; import org.opensearch.threadpool.ThreadPool; @@ -1889,6 +1897,7 @@ private final class TestClusterNode { private final ClusterInfoService clusterInfoService; private Coordinator coordinator; + private RemoteStoreNodeService remoteStoreNodeService; private Map actions = new HashMap<>(); @@ -1980,7 +1989,7 @@ public void onFailure(final Exception e) { return actualHandler; } } - }, a -> node, null, emptySet()); + }, a -> node, null, emptySet(), NoopTracer.INSTANCE); final IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver( new ThreadContext(Settings.EMPTY) ); @@ -1994,6 +2003,7 @@ public void onFailure(final Exception e) { emptyMap(), threadPool ); + remoteStoreNodeService = new RemoteStoreNodeService(new SetOnce<>(repositoriesService)::get, threadPool); final ActionFilters actionFilters = new ActionFilters(emptySet()); snapshotsService = new SnapshotsService( settings, @@ -2061,7 +2071,10 @@ public void onFailure(final Exception e) { emptyMap(), new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), repositoriesServiceReference::get, - fileCacheCleaner + fileCacheCleaner, + null, + new RemoteStoreStatsTrackerFactory(clusterService, settings), + DefaultRecoverySettings.INSTANCE ); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); snapshotShardsService = new SnapshotShardsService( @@ -2112,7 +2125,8 @@ public void onFailure(final Exception e) { shardStateAction, actionFilters, new IndexingPressureService(settings, clusterService), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ) ), new GlobalCheckpointSyncAction( @@ -2126,7 +2140,7 @@ public void onFailure(final Exception e) { ), RetentionLeaseSyncer.EMPTY, SegmentReplicationCheckpointPublisher.EMPTY, - mock(RemoteStorePressureService.class) + mock(RemoteStoreStatsTrackerFactory.class) ); final SystemIndices systemIndices = new SystemIndices(emptyMap()); @@ -2175,10 +2189,12 @@ public void onFailure(final Exception e) { clusterService, mock(IndicesService.class), mock(ShardStateAction.class), + mock(SegmentReplicationStatsTracker.class), mock(ThreadPool.class) ), mock(RemoteStorePressureService.class), - new SystemIndices(emptyMap()) + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ); actions.put( BulkAction.INSTANCE, @@ -2201,7 +2217,9 @@ public void onFailure(final Exception e) { indexNameExpressionResolver, new AutoCreateIndex(settings, clusterSettings, indexNameExpressionResolver, new SystemIndices(emptyMap())), new IndexingPressureService(settings, clusterService), - new SystemIndices(emptyMap()) + mock(IndicesService.class), + new SystemIndices(emptyMap()), + NoopTracer.INSTANCE ) ); final RestoreService restoreService = new RestoreService( @@ -2289,7 +2307,9 @@ public void onFailure(final Exception e) { namedWriteableRegistry, List.of(), client - ) + ), + null, + NoopMetricsRegistry.INSTANCE ) ); actions.put( @@ -2487,6 +2507,8 @@ public void start(ClusterState initialState) { initialState.term(), stateForNode(initialState, node) ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, persistedState); coordinator = new Coordinator( node.getName(), clusterService.getSettings(), @@ -2506,7 +2528,9 @@ public void start(ClusterState initialState) { random(), rerouteService, ElectionStrategy.DEFAULT_INSTANCE, - () -> new StatusInfo(HEALTHY, "healthy-info") + () -> new StatusInfo(HEALTHY, "healthy-info"), + persistedStateRegistry, + remoteStoreNodeService ); clusterManagerService.setClusterStatePublisher(coordinator); coordinator.start(); diff --git a/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java b/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java index ca8bec469f3bc..f9388c9e4b86e 100644 --- a/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java +++ b/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java @@ -90,7 +90,7 @@ public MockEventuallyConsistentRepository( final Context context, final Random random ) { - super(metadata, false, namedXContentRegistry, clusterService, recoverySettings); + super(metadata, namedXContentRegistry, clusterService, recoverySettings); this.context = context; this.namedXContentRegistry = namedXContentRegistry; this.random = random; diff --git a/server/src/test/java/org/opensearch/tasks/TaskCancellationMonitoringServiceTests.java b/server/src/test/java/org/opensearch/tasks/TaskCancellationMonitoringServiceTests.java index aa626013240b8..bb154b95f9f01 100644 --- a/server/src/test/java/org/opensearch/tasks/TaskCancellationMonitoringServiceTests.java +++ b/server/src/test/java/org/opensearch/tasks/TaskCancellationMonitoringServiceTests.java @@ -14,6 +14,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.tasks.TaskId; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.Scheduler; @@ -47,7 +48,7 @@ public class TaskCancellationMonitoringServiceTests extends OpenSearchTestCase { @Before public void setup() { threadPool = new TestThreadPool(getClass().getName()); - transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool); + transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, NoopTracer.INSTANCE); transportService.start(); transportService.acceptIncomingRequests(); taskManager = transportService.getTaskManager(); diff --git a/server/src/test/java/org/opensearch/telemetry/metrics/MetricsRegistryFactoryTests.java b/server/src/test/java/org/opensearch/telemetry/metrics/MetricsRegistryFactoryTests.java new file mode 100644 index 0000000000000..80942123fd4fd --- /dev/null +++ b/server/src/test/java/org/opensearch/telemetry/metrics/MetricsRegistryFactoryTests.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.noop.NoopCounter; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; +import org.opensearch.telemetry.tracing.TracingTelemetry; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; + +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class MetricsRegistryFactoryTests extends OpenSearchTestCase { + + private MetricsRegistryFactory metricsRegistryFactory; + + @After + public void close() { + metricsRegistryFactory.close(); + } + + public void testGetMeterRegistryWithUnavailableMetricsTelemetry() { + Settings settings = Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), false).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getTracingTelemetry()).thenReturn(mock(TracingTelemetry.class)); + metricsRegistryFactory = new MetricsRegistryFactory(telemetrySettings, Optional.empty()); + + MetricsRegistry metricsRegistry = metricsRegistryFactory.getMetricsRegistry(); + + assertTrue(metricsRegistry instanceof NoopMetricsRegistry); + assertTrue(metricsRegistry.createCounter("test", "test", "test") == NoopCounter.INSTANCE); + assertTrue(metricsRegistry.createUpDownCounter("test", "test", "test") == NoopCounter.INSTANCE); + } + + public void testGetMetricsWithAvailableMetricsTelemetry() { + Settings settings = Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getMetricsTelemetry()).thenReturn(mock(MetricsTelemetry.class)); + metricsRegistryFactory = new MetricsRegistryFactory(telemetrySettings, Optional.of(mockTelemetry)); + + MetricsRegistry metricsRegistry = metricsRegistryFactory.getMetricsRegistry(); + assertTrue(metricsRegistry instanceof DefaultMetricsRegistry); + + } + + public void testNullMetricsTelemetry() { + Settings settings = Settings.builder().put(TelemetrySettings.METRICS_FEATURE_ENABLED_SETTING.getKey(), false).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getMetricsTelemetry()).thenReturn(null); + metricsRegistryFactory = new MetricsRegistryFactory(telemetrySettings, Optional.of(mockTelemetry)); + + MetricsRegistry metricsRegistry = metricsRegistryFactory.getMetricsRegistry(); + assertTrue(metricsRegistry instanceof NoopMetricsRegistry); + + } + + private Set> getClusterSettings() { + Set> allTracerSettings = new HashSet<>(); + ClusterSettings.FEATURE_FLAGGED_CLUSTER_SETTINGS.get(List.of(FeatureFlags.TELEMETRY)).stream().forEach((allTracerSettings::add)); + return allTracerSettings; + } +} diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java new file mode 100644 index 0000000000000..b4183412cdf02 --- /dev/null +++ b/server/src/test/java/org/opensearch/telemetry/tracing/SpanBuilderTests.java @@ -0,0 +1,158 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.network.NetworkAddress; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.http.HttpRequest; +import org.opensearch.http.HttpResponse; +import org.opensearch.rest.RestRequest; +import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.Transport; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestOptions; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +public class SpanBuilderTests extends OpenSearchTestCase { + + public void testHttpRequestContext() { + HttpRequest httpRequest = createHttpRequest(); + SpanCreationContext context = SpanBuilder.from(httpRequest); + Attributes attributes = context.getAttributes(); + assertEquals("GET /_test", context.getSpanName()); + assertEquals("true", attributes.getAttributesMap().get(AttributeNames.TRACE)); + assertEquals("GET", attributes.getAttributesMap().get(AttributeNames.HTTP_METHOD)); + assertEquals("HTTP_1_0", attributes.getAttributesMap().get(AttributeNames.HTTP_PROTOCOL_VERSION)); + assertEquals("/_test", attributes.getAttributesMap().get(AttributeNames.HTTP_URI)); + } + + public void testRestRequestContext() { + RestRequest restRequest = RestRequest.request(null, createHttpRequest(), null); + SpanCreationContext context = SpanBuilder.from(restRequest); + Attributes attributes = context.getAttributes(); + assertEquals("GET /_test", context.getSpanName()); + assertEquals("/_test", attributes.getAttributesMap().get(AttributeNames.REST_REQ_RAW_PATH)); + assertNotNull(attributes.getAttributesMap().get(AttributeNames.REST_REQ_ID)); + } + + public void testRestRequestContextForNull() { + SpanCreationContext context = SpanBuilder.from((RestRequest) null); + assertEquals("rest_request", context.getSpanName()); + assertEquals(Attributes.EMPTY, context.getAttributes()); + } + + public void testTransportContext() { + String action = "test-action"; + Transport.Connection connection = createTransportConnection(); + SpanCreationContext context = SpanBuilder.from(action, connection); + Attributes attributes = context.getAttributes(); + assertEquals(action + " " + NetworkAddress.format(TransportAddress.META_ADDRESS), context.getSpanName()); + assertEquals(connection.getNode().getHostAddress(), attributes.getAttributesMap().get(AttributeNames.TRANSPORT_TARGET_HOST)); + } + + private static Transport.Connection createTransportConnection() { + return new Transport.Connection() { + @Override + public DiscoveryNode getNode() { + return new DiscoveryNode("local", new TransportAddress(TransportAddress.META_ADDRESS, 9200), Version.V_2_0_0); + } + + @Override + public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) + throws IOException, TransportException { + + } + + @Override + public void addCloseListener(ActionListener listener) { + + } + + @Override + public boolean isClosed() { + return false; + } + + @Override + public void close() { + + } + }; + } + + private static HttpRequest createHttpRequest() { + return new HttpRequest() { + @Override + public RestRequest.Method method() { + return RestRequest.Method.GET; + } + + @Override + public String uri() { + return "/_test"; + } + + @Override + public BytesReference content() { + return null; + } + + @Override + public Map> getHeaders() { + return Map.of("trace", Arrays.asList("true")); + } + + @Override + public List strictCookies() { + return null; + } + + @Override + public HttpVersion protocolVersion() { + return HttpVersion.HTTP_1_0; + } + + @Override + public HttpRequest removeHeader(String header) { + return null; + } + + @Override + public HttpResponse createResponse(RestStatus status, BytesReference content) { + return null; + } + + @Override + public Exception getInboundException() { + return null; + } + + @Override + public void release() { + + } + + @Override + public HttpRequest releaseAndCopy() { + return null; + } + }; + } +} diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java index 451a1b9e3eb9c..3a388be22445e 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java @@ -15,6 +15,8 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.tracing.attributes.Attributes; +import org.opensearch.telemetry.tracing.noop.NoopSpan; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.junit.After; @@ -46,7 +48,27 @@ public void testGetTracerWithUnavailableTracingTelemetryReturnsNoopTracer() { Tracer tracer = tracerFactory.getTracer(); assertTrue(tracer instanceof NoopTracer); - assertTrue(tracer.startSpan("foo") == SpanScope.NO_OP); + assertTrue(tracer.startSpan(SpanCreationContext.internal().name("foo").attributes(Attributes.EMPTY)) == NoopSpan.INSTANCE); + assertTrue(tracer.startScopedSpan(SpanCreationContext.internal().name("foo").attributes(Attributes.EMPTY)) == ScopedSpan.NO_OP); + assertTrue(tracer.startScopedSpan(SpanCreationContext.internal().name("foo").attributes(Attributes.EMPTY)) == ScopedSpan.NO_OP); + assertTrue( + tracer.withSpanInScope( + tracer.startSpan(SpanCreationContext.internal().name("foo").attributes(Attributes.EMPTY)) + ) == SpanScope.NO_OP + ); + } + + public void testGetTracerWithUnavailableTracingTelemetry() { + Settings settings = Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), false).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getTracingTelemetry()).thenReturn(mock(TracingTelemetry.class)); + tracerFactory = new TracerFactory(telemetrySettings, Optional.empty(), new ThreadContext(Settings.EMPTY)); + + Tracer tracer = tracerFactory.getTracer(); + + assertTrue(tracer instanceof NoopTracer); + assertTrue(tracer.startScopedSpan(SpanCreationContext.internal().name("foo").attributes(Attributes.EMPTY)) == ScopedSpan.NO_OP); } public void testGetTracerWithAvailableTracingTelemetryReturnsWrappedTracer() { @@ -61,6 +83,18 @@ public void testGetTracerWithAvailableTracingTelemetryReturnsWrappedTracer() { } + public void testNullTracer() { + Settings settings = Settings.builder().put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), false).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getTracingTelemetry()).thenReturn(null); + tracerFactory = new TracerFactory(telemetrySettings, Optional.of(mockTelemetry), new ThreadContext(Settings.EMPTY)); + + Tracer tracer = tracerFactory.getTracer(); + assertTrue(tracer instanceof NoopTracer); + + } + private Set> getClusterSettings() { Set> allTracerSettings = new HashSet<>(); ClusterSettings.FEATURE_FLAGGED_CLUSTER_SETTINGS.get(List.of(FeatureFlags.TELEMETRY)).stream().forEach((allTracerSettings::add)); diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java index f45381e3b4cc4..8606104d26103 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/WrappedTracerTests.java @@ -22,11 +22,11 @@ import java.util.List; import java.util.Set; -import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class WrappedTracerTests extends OpenSearchTestCase { @@ -36,9 +36,11 @@ public void testStartSpanWithTracingDisabledInvokesNoopTracer() throws Exception DefaultTracer mockDefaultTracer = mock(DefaultTracer.class); try (WrappedTracer wrappedTracer = new WrappedTracer(telemetrySettings, mockDefaultTracer)) { - wrappedTracer.startSpan("foo"); + SpanCreationContext spanCreationContext = SpanCreationContext.internal().name("foo"); + wrappedTracer.startSpan(spanCreationContext); assertTrue(wrappedTracer.getDelegateTracer() instanceof NoopTracer); - verify(mockDefaultTracer, never()).startSpan("foo"); + assertFalse(wrappedTracer.isRecording()); + verify(mockDefaultTracer, never()).startSpan(SpanCreationContext.internal().name("foo")); } } @@ -46,12 +48,14 @@ public void testStartSpanWithTracingEnabledInvokesDefaultTracer() throws Excepti Settings settings = Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true).build(); TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); DefaultTracer mockDefaultTracer = mock(DefaultTracer.class); - + when(mockDefaultTracer.isRecording()).thenReturn(true); try (WrappedTracer wrappedTracer = new WrappedTracer(telemetrySettings, mockDefaultTracer)) { - wrappedTracer.startSpan("foo"); + SpanCreationContext spanCreationContext = SpanCreationContext.internal().name("foo"); + wrappedTracer.startSpan(spanCreationContext); assertTrue(wrappedTracer.getDelegateTracer() instanceof DefaultTracer); - verify(mockDefaultTracer).startSpan(eq("foo"), eq(null), any(Attributes.class)); + assertTrue(wrappedTracer.isRecording()); + verify(mockDefaultTracer).startSpan(eq(spanCreationContext)); } } @@ -61,10 +65,11 @@ public void testStartSpanWithTracingEnabledInvokesDefaultTracerWithAttr() throws DefaultTracer mockDefaultTracer = mock(DefaultTracer.class); Attributes attributes = Attributes.create().addAttribute("key", "value"); try (WrappedTracer wrappedTracer = new WrappedTracer(telemetrySettings, mockDefaultTracer)) { - wrappedTracer.startSpan("foo", attributes); + SpanCreationContext spanCreationContext = SpanCreationContext.internal().name("foo"); + wrappedTracer.startSpan(spanCreationContext); assertTrue(wrappedTracer.getDelegateTracer() instanceof DefaultTracer); - verify(mockDefaultTracer).startSpan("foo", null, attributes); + verify(mockDefaultTracer).startSpan(spanCreationContext); } } diff --git a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java index 92bd15d818bca..19271bbf30e80 100644 --- a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java @@ -32,10 +32,13 @@ package org.opensearch.threadpool; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; +import java.util.Collection; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -43,14 +46,29 @@ import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Function; +import java.util.stream.Collectors; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.equalTo; public class ScalingThreadPoolTests extends OpenSearchThreadPoolTestCase { + @ParametersFactory + public static Collection scalingThreadPools() { + return ThreadPool.THREAD_POOL_TYPES.entrySet() + .stream() + .filter(t -> t.getValue().equals(ThreadPool.ThreadPoolType.SCALING)) + .map(e -> new String[] { e.getKey() }) + .collect(Collectors.toList()); + } + + private final String threadPoolName; + + public ScalingThreadPoolTests(String threadPoolName) { + this.threadPoolName = threadPoolName; + } + public void testScalingThreadPoolConfiguration() throws InterruptedException { - final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); final Settings.Builder builder = Settings.builder(); final int core; @@ -136,11 +154,11 @@ private int expectedSize(final String threadPoolName, final int numberOfProcesso sizes.put(ThreadPool.Names.TRANSLOG_SYNC, n -> 4 * n); sizes.put(ThreadPool.Names.REMOTE_PURGE, ThreadPool::halfAllocatedProcessorsMaxFive); sizes.put(ThreadPool.Names.REMOTE_REFRESH_RETRY, ThreadPool::halfAllocatedProcessorsMaxTen); + sizes.put(ThreadPool.Names.REMOTE_RECOVERY, ThreadPool::twiceAllocatedProcessors); return sizes.get(threadPoolName).apply(numberOfProcessors); } public void testScalingThreadPoolIsBounded() throws InterruptedException { - final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); final int size = randomIntBetween(32, 512); final Settings settings = Settings.builder().put("thread_pool." + threadPoolName + ".max", size).build(); runScalingThreadPoolTest(settings, (clusterSettings, threadPool) -> { @@ -170,7 +188,6 @@ public void testScalingThreadPoolIsBounded() throws InterruptedException { } public void testScalingThreadPoolThreadsAreTerminatedAfterKeepAlive() throws InterruptedException { - final String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); final int min = "generic".equals(threadPoolName) ? 4 : 1; final Settings settings = Settings.builder() .put("thread_pool." + threadPoolName + ".max", 128) diff --git a/server/src/test/java/org/opensearch/threadpool/ThreadPoolStatsTests.java b/server/src/test/java/org/opensearch/threadpool/ThreadPoolStatsTests.java index 0965f17ba5c70..869d7ec59b081 100644 --- a/server/src/test/java/org/opensearch/threadpool/ThreadPoolStatsTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ThreadPoolStatsTests.java @@ -51,13 +51,13 @@ public class ThreadPoolStatsTests extends OpenSearchTestCase { public void testThreadPoolStatsSort() throws IOException { List stats = new ArrayList<>(); - stats.add(new ThreadPoolStats.Stats("z", -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("m", 3, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("m", 1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("d", -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("m", 2, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("t", -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats("a", -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats("z", -1, 0, 0, 0, 0, 0L, 0L)); + stats.add(new ThreadPoolStats.Stats("m", 3, 0, 0, 0, 0, 0L, 0L)); + stats.add(new ThreadPoolStats.Stats("m", 1, 0, 0, 0, 0, 0L, 0L)); + stats.add(new ThreadPoolStats.Stats("d", -1, 0, 0, 0, 0, 0L, 0L)); + stats.add(new ThreadPoolStats.Stats("m", 2, 0, 0, 0, 0, 0L, 0L)); + stats.add(new ThreadPoolStats.Stats("t", -1, 0, 0, 0, 0, 0L, 0L)); + stats.add(new ThreadPoolStats.Stats("a", -1, 0, 0, 0, 0, 0L, 0L)); List copy = new ArrayList<>(stats); Collections.sort(copy); @@ -79,11 +79,11 @@ public void testThreadPoolStatsToXContent() throws IOException { try (BytesStreamOutput os = new BytesStreamOutput()) { List stats = new ArrayList<>(); - stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SEARCH, -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.WARMER, -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.GENERIC, -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.FORCE_MERGE, -1, 0, 0, 0, 0, 0L)); - stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SAME, -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SEARCH, -1, 0, 0, 0, 0, 0L, 0L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.WARMER, -1, 0, 0, 0, 0, 0L, -1L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.GENERIC, -1, 0, 0, 0, 0, 0L, -1L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.FORCE_MERGE, -1, 0, 0, 0, 0, 0L, -1L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SAME, -1, 0, 0, 0, 0, 0L, -1L)); ThreadPoolStats threadPoolStats = new ThreadPoolStats(stats); try (XContentBuilder builder = new XContentBuilder(MediaTypeRegistry.JSON.xContent(), os)) { diff --git a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java index 9a261c5745bc2..e002297911788 100644 --- a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java @@ -50,6 +50,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -117,7 +118,8 @@ public void sendMessage(BytesReference reference, ActionListener listener) handshaker, keepAlive, requestHandlers, - responseHandlers + responseHandlers, + NoopTracer.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/transport/ProxyConnectionStrategyTests.java b/server/src/test/java/org/opensearch/transport/ProxyConnectionStrategyTests.java index 510a2b3abd943..1c9880ed14714 100644 --- a/server/src/test/java/org/opensearch/transport/ProxyConnectionStrategyTests.java +++ b/server/src/test/java/org/opensearch/transport/ProxyConnectionStrategyTests.java @@ -42,6 +42,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -82,7 +83,7 @@ public MockTransportService startTransport(final String id, final Version versio .put("node.name", id) .put(settings) .build(); - MockTransportService newService = MockTransportService.createNewService(s, version, threadPool); + MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, NoopTracer.INSTANCE); try { newService.start(); newService.acceptIncomingRequests(); @@ -99,7 +100,14 @@ public void testProxyStrategyWillOpenExpectedNumberOfConnectionsToAddress() { try (MockTransportService transport1 = startTransport("node1", Version.CURRENT)) { TransportAddress address1 = transport1.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -138,7 +146,14 @@ public void testProxyStrategyWillOpenNewConnectionsOnDisconnect() throws Excepti TransportAddress address1 = transport1.boundAddress().publishAddress(); TransportAddress address2 = transport2.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -200,7 +215,14 @@ public void testConnectFailsWithIncompatibleNodes() { try (MockTransportService transport1 = startTransport("incompatible-node", incompatibleVersion)) { TransportAddress address1 = transport1.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -240,7 +262,14 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro TransportAddress address1 = transport1.boundAddress().publishAddress(); TransportAddress address2 = transport2.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -303,7 +332,14 @@ public void testProxyStrategyWillResolveAddressesEachConnect() throws Exception return address; }; - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -338,7 +374,14 @@ public void testProxyStrategyWillNeedToBeRebuiltIfNumOfSocketsOrAddressesOrServe try (MockTransportService remoteTransport = startTransport("node1", Version.CURRENT)) { TransportAddress remoteAddress = remoteTransport.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -441,7 +484,14 @@ public void testServerNameAttributes() { try (MockTransportService transport1 = startTransport("node1", Version.CURRENT, bindSettings)) { TransportAddress address1 = transport1.boundAddress().publishAddress(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/transport/RemoteClusterAwareClientTests.java b/server/src/test/java/org/opensearch/transport/RemoteClusterAwareClientTests.java index a2a77168c8991..7595982837365 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteClusterAwareClientTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteClusterAwareClientTests.java @@ -41,6 +41,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.action.ActionListener; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -81,7 +82,14 @@ public void testSearchShards() throws Exception { Collections.shuffle(knownNodes, random()); Settings.Builder builder = Settings.builder(); builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); - try (MockTransportService service = MockTransportService.createNewService(builder.build(), Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + builder.build(), + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); @@ -121,7 +129,14 @@ public void testSearchShardsThreadContextHeader() { Collections.shuffle(knownNodes, random()); Settings.Builder builder = Settings.builder(); builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); - try (MockTransportService service = MockTransportService.createNewService(builder.build(), Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + builder.build(), + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/opensearch/transport/RemoteClusterClientTests.java index b89d652510850..f3b7f9916d460 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteClusterClientTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteClusterClientTests.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.settings.Settings; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.transport.MockTransportService; @@ -79,7 +80,14 @@ public void testConnectAndExecuteRequest() throws Exception { .put(onlyRole(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)) .put("cluster.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()) .build(); - try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + localSettings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); // following two log lines added to investigate #41745, can be removed once issue is closed logger.info("Start accepting incoming requests on local transport service"); @@ -118,7 +126,14 @@ public void testEnsureWeReconnect() throws Exception { .put(onlyRole(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)) .put("cluster.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()) .build(); - try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + localSettings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); // this test is not perfect since we might reconnect concurrently but it will fail most of the time if we don't have // the right calls in place in the RemoteAwareClient @@ -147,7 +162,9 @@ public void testEnsureWeReconnect() throws Exception { public void testRemoteClusterServiceNotEnabled() { final Settings settings = removeRoles(Collections.singleton(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)); - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); diff --git a/server/src/test/java/org/opensearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/opensearch/transport/RemoteClusterConnectionTests.java index d481f361f2e54..bb653439ec21e 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteClusterConnectionTests.java @@ -66,6 +66,7 @@ import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -126,7 +127,7 @@ public static MockTransportService startTransport( boolean success = false; final Settings s = Settings.builder().put(settings).put("node.name", id).build(); ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(s); - MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, null); + MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, NoopTracer.INSTANCE); try { newService.registerRequestHandler( ClusterSearchShardsAction.NAME, @@ -231,7 +232,14 @@ public void run() { }; t.start(); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); CountDownLatch listenerCalled = new CountDownLatch(1); @@ -280,7 +288,14 @@ public void testCloseWhileConcurrentlyConnecting() throws IOException, Interrupt List seedNodes = addresses(seedNode1, seedNode); Collections.shuffle(seedNodes, random()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; @@ -367,7 +382,14 @@ public void testGetConnectionInfo() throws Exception { List seedNodes = addresses(node3, node1, node2); Collections.shuffle(seedNodes, random()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); int maxNumConnections = randomIntBetween(1, 5); @@ -480,7 +502,14 @@ public void testCollectNodes() throws Exception { try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT)) { DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); knownNodes.add(seedTransport.getLocalDiscoNode()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; @@ -515,7 +544,14 @@ public void testNoChannelsExceptREG() throws Exception { try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT)) { DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); knownNodes.add(seedTransport.getLocalDiscoNode()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; @@ -568,7 +604,14 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted ); Collections.shuffle(seedNodes, random()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); @@ -645,7 +688,14 @@ public void testGetConnection() throws Exception { DiscoveryNode disconnectedNode = disconnectedTransport.getLocalNode(); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; diff --git a/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java index f6f3e8fa60863..449715189c881 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java @@ -44,6 +44,7 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; @@ -162,7 +163,7 @@ public void testGroupClusterIndices() throws IOException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -233,7 +234,7 @@ public void testGroupIndices() throws IOException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -326,7 +327,7 @@ public void testIncrementallyAddClusters() throws IOException { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -393,7 +394,12 @@ public void testDefaultPingSchedule() throws IOException { } Settings settings = settingsBuilder.build(); try ( - MockTransportService transportService = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null) + MockTransportService transportService = MockTransportService.createNewService( + settings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) ) { transportService.start(); transportService.acceptIncomingRequests(); @@ -436,7 +442,7 @@ public void testCustomPingSchedule() throws IOException { transportSettings, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -474,7 +480,7 @@ public void testChangeSettings() throws Exception { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -523,7 +529,12 @@ public void testRemoteNodeAttribute() throws IOException, InterruptedException { Collections.shuffle(knownNodes, random()); try ( - MockTransportService transportService = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null) + MockTransportService transportService = MockTransportService.createNewService( + settings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) ) { transportService.start(); transportService.acceptIncomingRequests(); @@ -586,7 +597,12 @@ public void testRemoteNodeRoles() throws IOException, InterruptedException { Collections.shuffle(knownNodes, random()); try ( - MockTransportService transportService = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null) + MockTransportService transportService = MockTransportService.createNewService( + settings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) ) { transportService.start(); transportService.acceptIncomingRequests(); @@ -654,7 +670,12 @@ public void testCollectNodes() throws InterruptedException, IOException { Collections.shuffle(knownNodes_c2, random()); try ( - MockTransportService transportService = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null) + MockTransportService transportService = MockTransportService.createNewService( + settings, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) ) { transportService.start(); transportService.acceptIncomingRequests(); @@ -901,7 +922,7 @@ public void testReconnectWhenStrategySettingsUpdated() throws Exception { Settings.EMPTY, Version.CURRENT, threadPool, - null + NoopTracer.INSTANCE ) ) { transportService.start(); @@ -983,7 +1004,14 @@ public void testSkipUnavailable() { knownNodes.add(seedNode); Settings.Builder builder = Settings.builder(); builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); - try (MockTransportService service = MockTransportService.createNewService(builder.build(), Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService( + builder.build(), + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { service.start(); service.acceptIncomingRequests(); @@ -1002,7 +1030,9 @@ public void testSkipUnavailable() { public void testRemoteClusterServiceNotEnabledGetRemoteClusterConnection() { final Settings settings = removeRoles(Collections.singleton(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)); - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); final IllegalArgumentException e = expectThrows( @@ -1015,7 +1045,9 @@ public void testRemoteClusterServiceNotEnabledGetRemoteClusterConnection() { public void testRemoteClusterServiceNotEnabledGetCollectNodes() { final Settings settings = removeRoles(Collections.singleton(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)); - try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + try ( + MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, NoopTracer.INSTANCE) + ) { service.start(); service.acceptIncomingRequests(); final IllegalArgumentException e = expectThrows( diff --git a/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java b/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java index ca85ec2270caf..c89a9d328b419 100644 --- a/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java +++ b/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java @@ -49,6 +49,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.transport.MockTransportService; @@ -105,7 +106,7 @@ public MockTransportService startTransport( .put(settings) .build(); ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(s); - MockTransportService newService = MockTransportService.createNewService(s, version, threadPool); + MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, NoopTracer.INSTANCE); try { newService.registerRequestHandler( ClusterStateAction.NAME, @@ -143,7 +144,14 @@ public void testSniffStrategyWillConnectToAndDiscoverNodes() { knownNodes.add(discoverableNode); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -192,7 +200,14 @@ public void testSniffStrategyWillResolveDiscoveryNodesEachConnect() throws Excep return seedNode; }; - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -240,7 +255,14 @@ public void testSniffStrategyWillConnectToMaxAllowedNodesAndOpenNewConnectionsOn knownNodes.add(discoverableNode2); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -297,7 +319,14 @@ public void testDiscoverWithSingleIncompatibleSeedNode() { knownNodes.add(discoverableNode); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -336,7 +365,14 @@ public void testConnectFailsWithIncompatibleNodes() { DiscoveryNode incompatibleSeedNode = incompatibleSeedTransport.getLocalNode(); knownNodes.add(incompatibleSeedNode); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -378,7 +414,14 @@ public void testFilterNodesWithNodePredicate() { DiscoveryNode rejectedNode = randomBoolean() ? seedNode : discoverableNode; Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -424,7 +467,14 @@ public void testConnectFailsIfNoConnectionsOpened() { knownNodes.add(discoverableNode); closedTransport.close(); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -474,7 +524,14 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro Collections.shuffle(knownNodes, random()); Collections.shuffle(otherKnownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -542,7 +599,14 @@ public void testMultipleCallsToConnectEnsuresConnection() { knownNodes.add(discoverableNode); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -589,8 +653,18 @@ public void testConfiguredProxyAddressModeWillReplaceNodeAddress() { List knownNodes = new CopyOnWriteArrayList<>(); try ( MockTransportService accessible = startTransport("seed_node", knownNodes, Version.CURRENT); - MockTransportService unresponsive1 = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool); - MockTransportService unresponsive2 = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool) + MockTransportService unresponsive1 = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ); + MockTransportService unresponsive2 = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) ) { // We start in order to get a valid address + port, but do not start accepting connections as we // will not actually connect to these transports @@ -616,7 +690,14 @@ public void testConfiguredProxyAddressModeWillReplaceNodeAddress() { knownNodes.add(discoverableNode); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); @@ -679,7 +760,14 @@ public void testSniffStrategyWillNeedToBeRebuiltIfNumOfConnectionsOrSeedsOrProxy knownNodes.add(discoverableNode); Collections.shuffle(knownNodes, random()); - try (MockTransportService localService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool)) { + try ( + MockTransportService localService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + NoopTracer.INSTANCE + ) + ) { localService.start(); localService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/transport/TcpTransportTests.java b/server/src/test/java/org/opensearch/transport/TcpTransportTests.java index 06545b77c6d76..7ab78cca7d615 100644 --- a/server/src/test/java/org/opensearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/opensearch/transport/TcpTransportTests.java @@ -47,6 +47,7 @@ import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.junit.annotations.TestLogging; @@ -255,7 +256,8 @@ private void testDefaultSeedAddresses(final Settings settings, Matcher localNode, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.registerRequestHandler( diff --git a/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java b/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java index 5abb032120dcf..9a884fd29d109 100644 --- a/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java @@ -43,6 +43,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -84,7 +85,8 @@ private NetworkHandle startServices(String nodeNameAndId, Settings settings, Ver new NetworkService(Collections.emptyList()), PageCacheRecycler.NON_RECYCLING_INSTANCE, new NamedWriteableRegistry(Collections.emptyList()), - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); TransportService transportService = new MockTransportService( settings, @@ -100,7 +102,8 @@ private NetworkHandle startServices(String nodeNameAndId, Settings settings, Ver version ), null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/settings.gradle b/settings.gradle index c04b5997d49b1..13cc6669e3d33 100644 --- a/settings.gradle +++ b/settings.gradle @@ -13,9 +13,11 @@ plugins { id "com.gradle.enterprise" version "3.14.1" } +ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') + buildCache { local { - enabled = true + enabled = !disableBuildCache removeUnusedEntriesAfterDays = 14 } } diff --git a/test/external-modules/delayed-aggs/src/internalClusterTest/java/org/opensearch/search/aggregations/DelayedShardAggregationIT.java b/test/external-modules/delayed-aggs/src/internalClusterTest/java/org/opensearch/search/aggregations/DelayedShardAggregationIT.java index e850ae9dcc859..90f4f1ba2ceb2 100644 --- a/test/external-modules/delayed-aggs/src/internalClusterTest/java/org/opensearch/search/aggregations/DelayedShardAggregationIT.java +++ b/test/external-modules/delayed-aggs/src/internalClusterTest/java/org/opensearch/search/aggregations/DelayedShardAggregationIT.java @@ -31,25 +31,43 @@ package org.opensearch.search.aggregations; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.plugins.Plugin; import org.opensearch.search.aggregations.bucket.filter.InternalFilter; import org.opensearch.search.aggregations.metrics.InternalMax; import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class DelayedShardAggregationIT extends OpenSearchIntegTestCase { +public class DelayedShardAggregationIT extends ParameterizedOpenSearchIntegTestCase { + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } @Override protected Collection> nodePlugins() { diff --git a/test/external-modules/delayed-aggs/src/main/java/org/opensearch/search/aggregations/DelayedShardAggregationBuilder.java b/test/external-modules/delayed-aggs/src/main/java/org/opensearch/search/aggregations/DelayedShardAggregationBuilder.java index 02952eb7390dc..006632ca93925 100644 --- a/test/external-modules/delayed-aggs/src/main/java/org/opensearch/search/aggregations/DelayedShardAggregationBuilder.java +++ b/test/external-modules/delayed-aggs/src/main/java/org/opensearch/search/aggregations/DelayedShardAggregationBuilder.java @@ -137,6 +137,11 @@ protected Aggregator createInternal( } while (searchContext.getRelativeTimeInMillis() - start < delay.getMillis()); return factory.create(searchContext, parent, cardinality); } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } }; } diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index e30ff57abd3a3..b09c044e62a7a 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'opensearch.java' group = 'hdfs' versions << [ - 'jetty': '9.4.51.v20230217' + 'jetty': '9.4.52.v20230823' ] dependencies { @@ -49,7 +49,7 @@ dependencies { exclude module: "json-io" } api "org.codehaus.jettison:jettison:${versions.jettison}" - api "org.apache.commons:commons-compress:1.23.0" + api "org.apache.commons:commons-compress:${versions.commonscompress}" api "commons-codec:commons-codec:${versions.commonscodec}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api "io.netty:netty-all:${versions.netty}" @@ -71,5 +71,5 @@ dependencies { exclude group: "com.squareup.okio" } runtimeOnly "com.squareup.okio:okio:3.5.0" - runtimeOnly "org.xerial.snappy:snappy-java:1.1.10.3" + runtimeOnly "org.xerial.snappy:snappy-java:1.1.10.5" } diff --git a/test/framework/src/main/java/org/opensearch/action/support/ActionTestUtils.java b/test/framework/src/main/java/org/opensearch/action/support/ActionTestUtils.java index 00fd5a5bcf815..52496c331c25e 100644 --- a/test/framework/src/main/java/org/opensearch/action/support/ActionTestUtils.java +++ b/test/framework/src/main/java/org/opensearch/action/support/ActionTestUtils.java @@ -55,7 +55,7 @@ public static R /** * Executes the given action. - * + *

            * This is a shim method to make execution publicly available in tests. */ public static void execute( diff --git a/test/framework/src/main/java/org/opensearch/cli/CommandTestCase.java b/test/framework/src/main/java/org/opensearch/cli/CommandTestCase.java index 4ee1314d27fe1..5566b493adc7d 100644 --- a/test/framework/src/main/java/org/opensearch/cli/CommandTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cli/CommandTestCase.java @@ -54,7 +54,7 @@ public void resetTerminal() { /** * Runs a command with the given args. - * + *

            * Output can be found in {@link #terminal}. */ public String execute(String... args) throws Exception { diff --git a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java index 6354cf18e8b62..2ba4de5e54a67 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java @@ -112,6 +112,7 @@ List adjustNodesStats(List nodesStats) { nodeStats.getDiscoveryStats(), nodeStats.getIngestStats(), nodeStats.getAdaptiveSelectionStats(), + nodeStats.getResourceUsageStats(), nodeStats.getScriptCacheStats(), nodeStats.getIndexingPressureStats(), nodeStats.getShardIndexingPressureStats(), @@ -120,7 +121,9 @@ List adjustNodesStats(List nodesStats) { nodeStats.getWeightedRoutingStats(), nodeStats.getFileCacheStats(), nodeStats.getTaskCancellationStats(), - nodeStats.getSearchPipelineStats() + nodeStats.getSearchPipelineStats(), + nodeStats.getSegmentReplicationRejectionStats(), + nodeStats.getRepositoriesStats() ); }).collect(Collectors.toList()); } diff --git a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationWithConstraintsTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationWithConstraintsTestCase.java index 7f1d1d3381751..0c08de252e4cd 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationWithConstraintsTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/OpenSearchAllocationWithConstraintsTestCase.java @@ -209,28 +209,28 @@ public int allocateAndCheckIndexShardHotSpots(boolean expected, int nodes, Strin continue; } - /** - * Hot spots can occur due to the order in which shards get allocated to nodes. - * A node with fewer shards may not be able to accept current shard due to - * SameShardAllocationDecider, causing it to breach allocation constraint on - * another node. We need to differentiate between such hot spots v/s actual hot - * spots. - * - * A simple check could be to ensure there is no node with shards less than - * allocation limit, that can accept current shard. However, in current - * allocation algorithm, when nodes get throttled, shards are added to - * ModelNodes without adding them to actual cluster (RoutingNodes). As a result, - * the shards per node we see here, are different from the ones observed by - * weight function in balancer. RoutingNodes with {@link count} < {@link limit} - * may not have had the same count in the corresponding ModelNode seen by weight - * function. We hence use the following alternate check -- - * - * Given the way {@link limit} is defined, we should not have hot spots if *all* - * nodes are eligible to accept the shard. A hot spot is acceptable, if either - * all peer nodes have {@link count} > {@link limit}, or if even one node is - * ineligible to accept the shard due to SameShardAllocationDecider, as this - * leads to a chain of events that breach IndexShardsPerNode constraint on all - * other nodes. + /* + Hot spots can occur due to the order in which shards get allocated to nodes. + A node with fewer shards may not be able to accept current shard due to + SameShardAllocationDecider, causing it to breach allocation constraint on + another node. We need to differentiate between such hot spots v/s actual hot + spots. + + A simple check could be to ensure there is no node with shards less than + allocation limit, that can accept current shard. However, in current + allocation algorithm, when nodes get throttled, shards are added to + ModelNodes without adding them to actual cluster (RoutingNodes). As a result, + the shards per node we see here, are different from the ones observed by + weight function in balancer. RoutingNodes with {@link count} < {@link limit} + may not have had the same count in the corresponding ModelNode seen by weight + function. We hence use the following alternate check -- + + Given the way {@link limit} is defined, we should not have hot spots if *all* + nodes are eligible to accept the shard. A hot spot is acceptable, if either + all peer nodes have {@link count} > {@link limit}, or if even one node is + ineligible to accept the shard due to SameShardAllocationDecider, as this + leads to a chain of events that breach IndexShardsPerNode constraint on all + other nodes. */ // If all peer nodes have count >= limit, hotspot is acceptable diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index 8fac407547a9d..28d7706fb1493 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -49,6 +49,7 @@ import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; import org.opensearch.cluster.coordination.LinearizabilityChecker.History; import org.opensearch.cluster.coordination.LinearizabilityChecker.SequentialSpec; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; @@ -58,6 +59,7 @@ import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; import org.opensearch.common.Nullable; import org.opensearch.common.Randomness; +import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -84,6 +86,9 @@ import org.opensearch.gateway.PersistedClusterStateService; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; import org.opensearch.test.disruption.DisruptableMockTransport.ConnectionStatus; @@ -840,14 +845,16 @@ class MockPersistedState implements CoordinationState.PersistedState { private final CoordinationState.PersistedState delegate; private final NodeEnvironment nodeEnvironment; + private MockGatewayMetaState mockGatewayMetaState; + MockPersistedState(DiscoveryNode localNode) { try { if (rarely()) { nodeEnvironment = newNodeEnvironment(); nodeEnvironments.add(nodeEnvironment); - final MockGatewayMetaState gatewayMetaState = new MockGatewayMetaState(localNode, bigArrays); - gatewayMetaState.start(Settings.EMPTY, nodeEnvironment, xContentRegistry()); - delegate = gatewayMetaState.getPersistedState(); + mockGatewayMetaState = new MockGatewayMetaState(localNode, bigArrays); + mockGatewayMetaState.start(Settings.EMPTY, nodeEnvironment, xContentRegistry(), persistedStateRegistry()); + delegate = mockGatewayMetaState.getPersistedState(); } else { nodeEnvironment = null; delegate = new InMemoryPersistedState( @@ -864,11 +871,12 @@ class MockPersistedState implements CoordinationState.PersistedState { MockPersistedState( DiscoveryNode newLocalNode, - MockPersistedState oldState, + PersistedStateRegistry persistedStateRegistry, Function adaptGlobalMetadata, Function adaptCurrentTerm ) { try { + MockPersistedState oldState = (MockPersistedState) persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL); if (oldState.nodeEnvironment != null) { nodeEnvironment = oldState.nodeEnvironment; final Metadata updatedMetadata = adaptGlobalMetadata.apply(oldState.getLastAcceptedState().metadata()); @@ -890,7 +898,7 @@ class MockPersistedState implements CoordinationState.PersistedState { } } final MockGatewayMetaState gatewayMetaState = new MockGatewayMetaState(newLocalNode, bigArrays); - gatewayMetaState.start(Settings.EMPTY, nodeEnvironment, xContentRegistry()); + gatewayMetaState.start(Settings.EMPTY, nodeEnvironment, xContentRegistry(), persistedStateRegistry()); delegate = gatewayMetaState.getPersistedState(); } else { nodeEnvironment = null; @@ -1008,6 +1016,11 @@ public void setLastAcceptedState(ClusterState clusterState) { delegate.setLastAcceptedState(clusterState); } + @Override + public PersistedStateStats getStats() { + return null; + } + @Override public void close() { assertTrue(openPersistedStates.remove(this)); @@ -1025,7 +1038,7 @@ class ClusterNode { private final int nodeIndex; Coordinator coordinator; private final DiscoveryNode localNode; - final MockPersistedState persistedState; + final PersistedStateRegistry persistedStateRegistry; final Settings nodeSettings; private AckedFakeThreadPoolClusterManagerService clusterManagerService; private DisruptableClusterApplierService clusterApplierService; @@ -1033,6 +1046,8 @@ class ClusterNode { TransportService transportService; private DisruptableMockTransport mockTransport; private NodeHealthService nodeHealthService; + private RepositoriesService repositoriesService; + private RemoteStoreNodeService remoteStoreNodeService; List> extraJoinValidators = new ArrayList<>(); ClusterNode(int nodeIndex, boolean clusterManagerEligible, Settings nodeSettings, NodeHealthService nodeHealthService) { @@ -1056,7 +1071,9 @@ class ClusterNode { this.nodeIndex = nodeIndex; this.localNode = localNode; this.nodeSettings = nodeSettings; - persistedState = persistedStateSupplier.apply(localNode); + final MockPersistedState persistedState = persistedStateSupplier.apply(localNode); + persistedStateRegistry = persistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, persistedState); assertTrue("must use a fresh PersistedState", openPersistedStates.add(persistedState)); boolean success = false; try { @@ -1105,7 +1122,8 @@ protected Optional getDisruptableMockTransport(Transpo getTransportInterceptor(localNode, threadPool), a -> localNode, null, - emptySet() + emptySet(), + NoopTracer.INSTANCE ); clusterManagerService = new AckedFakeThreadPoolClusterManagerService( localNode.getId(), @@ -1125,6 +1143,15 @@ protected Optional getDisruptableMockTransport(Transpo clusterService.setNodeConnectionsService( new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService) ); + repositoriesService = new RepositoriesService( + settings, + clusterService, + transportService, + Collections.emptyMap(), + Collections.emptyMap(), + threadPool + ); + remoteStoreNodeService = new RemoteStoreNodeService(new SetOnce<>(repositoriesService)::get, threadPool); final Collection> onJoinValidators = Collections.singletonList( (dn, cs) -> extraJoinValidators.forEach(validator -> validator.accept(dn, cs)) ); @@ -1144,7 +1171,9 @@ protected Optional getDisruptableMockTransport(Transpo Randomness.get(), (s, p, r) -> {}, getElectionStrategy(), - nodeHealthService + nodeHealthService, + persistedStateRegistry, + remoteStoreNodeService ); clusterManagerService.setClusterStatePublisher(coordinator); final GatewayService gatewayService = new GatewayService( @@ -1204,14 +1233,14 @@ ClusterNode restartedNode( return new ClusterNode( nodeIndex, newLocalNode, - node -> new MockPersistedState(newLocalNode, persistedState, adaptGlobalMetadata, adaptCurrentTerm), + node -> new MockPersistedState(newLocalNode, persistedStateRegistry, adaptGlobalMetadata, adaptCurrentTerm), nodeSettings, nodeHealthService ); } private CoordinationState.PersistedState getPersistedState() { - return persistedState; + return persistedStateRegistry.getPersistedState(PersistedStateType.LOCAL); } String getId() { diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java index 9f7802a401391..cbe695cbb2136 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/CoordinationStateTestCluster.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.PersistedStateRegistry.PersistedStateType; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; @@ -128,6 +129,8 @@ static class ClusterNode { DiscoveryNode localNode; CoordinationState.PersistedState persistedState; + PersistedStateRegistry persistedStateRegistry; + CoordinationState state; ClusterNode(DiscoveryNode localNode, ElectionStrategy electionStrategy) { @@ -143,8 +146,11 @@ static class ClusterNode { 0L ) ); + persistedStateRegistry = new PersistedStateRegistry(); + persistedStateRegistry.addPersistedState(PersistedStateType.LOCAL, persistedState); + this.electionStrategy = electionStrategy; - state = new CoordinationState(localNode, persistedState, electionStrategy); + state = new CoordinationState(localNode, persistedStateRegistry, electionStrategy, Settings.EMPTY); } void reboot() { @@ -183,7 +189,7 @@ void reboot() { localNode.getVersion() ); - state = new CoordinationState(localNode, persistedState, electionStrategy); + state = new CoordinationState(localNode, persistedStateRegistry, electionStrategy, Settings.EMPTY); } void setInitialState(CoordinationMetadata.VotingConfiguration initialConfig, long initialValue) { diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java index 60aacb83e0dc1..946b980bfb62f 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/LinearizabilityChecker.java @@ -476,22 +476,22 @@ void unlift() { /** * A cache optimized for small bit-counts (less than 64) and small number of unique permutations of state objects. - * + *

            * Each combination of states is kept once only, building on the * assumption that the number of permutations is small compared to the * number of bits permutations. For those histories that are difficult to check * we will have many bits combinations that use the same state permutations. - * + *

            * The smallMap optimization allows us to avoid object overheads for bit-sets up to 64 bit large. - * + *

            * Comparing set of (bits, state) to smallMap: * (bits, state) : 24 (tuple) + 24 (FixedBitSet) + 24 (bits) + 5 (hash buckets) + 24 (hashmap node). * smallMap bits to {state} : 10 (bits) + 5 (hash buckets) + avg-size of unique permutations. - * + *

            * The avg-size of the unique permutations part is very small compared to the * sometimes large number of bits combinations (which are the cases where * we run into trouble). - * + *

            * set of (bits, state) totals 101 bytes compared to smallMap bits to { state } * which totals 15 bytes, ie. a 6x improvement in memory usage. */ diff --git a/test/framework/src/main/java/org/opensearch/common/hash/AvalancheStats.java b/test/framework/src/main/java/org/opensearch/common/hash/AvalancheStats.java new file mode 100644 index 0000000000000..c1600abcacd3e --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/common/hash/AvalancheStats.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import java.util.Locale; + +/** + * Represents the avalanche statistics of a hash function. + */ +public class AvalancheStats { + private final int inputBits; + private final int outputBits; + private final double bias; + private final double sumOfSquaredErrors; + + public AvalancheStats(int[][] flips, int iterations) { + this.inputBits = flips.length; + this.outputBits = flips[0].length; + double sumOfBiases = 0; + double sumOfSquaredErrors = 0; + + for (int i = 0; i < inputBits; i++) { + for (int o = 0; o < outputBits; o++) { + sumOfSquaredErrors += Math.pow(0.5 - ((double) flips[i][o] / iterations), 2); + sumOfBiases += 2 * ((double) flips[i][o] / iterations) - 1; + } + } + + this.bias = Math.abs(sumOfBiases / (inputBits * outputBits)); + this.sumOfSquaredErrors = sumOfSquaredErrors; + } + + public double bias() { + return bias; + } + + public double diffusion() { + return 1 - bias; + } + + public double sumOfSquaredErrors() { + return sumOfSquaredErrors; + } + + @Override + public String toString() { + return String.format( + Locale.ROOT, + "AvalancheStats{inputBits=%d, outputBits=%d, bias=%.4f%%, diffusion=%.4f%%, sumOfSquaredErrors=%.2f}", + inputBits, + outputBits, + bias() * 100, + diffusion() * 100, + sumOfSquaredErrors() + ); + } +} diff --git a/test/framework/src/main/java/org/opensearch/common/hash/HashFunctionTestCase.java b/test/framework/src/main/java/org/opensearch/common/hash/HashFunctionTestCase.java new file mode 100644 index 0000000000000..e272fe0962047 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/common/hash/HashFunctionTestCase.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import org.opensearch.common.Randomness; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Arrays; +import java.util.Random; + +/** + * Base class for testing the quality of hash functions. + */ +public abstract class HashFunctionTestCase extends OpenSearchTestCase { + private static final int[] INPUT_BITS = new int[] { 24, 32, 40, 48, 56, 64, 72, 80, 96, 112, 128, 160, 512, 1024 }; + private static final int ITERATIONS = 1000; + private static final double BIAS_THRESHOLD = 0.01; // 1% + + public abstract byte[] hash(byte[] input); + + public abstract int outputBits(); + + /** + * Tests if the hash function shows an avalanche effect, i.e, flipping a single input bit + * should flip half the output bits. + */ + public void testAvalanche() { + for (int inputBits : INPUT_BITS) { + AvalancheStats stats = simulate(inputBits); + if (stats.bias() >= BIAS_THRESHOLD) { + fail("bias exceeds threshold: " + stats); + } + } + } + + private AvalancheStats simulate(int inputBits) { + int outputBits = outputBits(); + assert inputBits % 8 == 0; // using full bytes for simplicity + assert outputBits % 8 == 0; // using full bytes for simplicity + byte[] input = new byte[inputBits >>> 3]; + Random random = Randomness.get(); + int[][] flips = new int[inputBits][outputBits]; + + for (int iter = 0; iter < ITERATIONS; iter++) { + random.nextBytes(input); + byte[] hash = Arrays.copyOf(hash(input), outputBits >>> 3); // copying since the underlying byte-array is reused + + for (int i = 0; i < inputBits; i++) { + flipBit(input, i); // flip one bit + byte[] newHash = hash(input); // recompute the hash; half the bits should have flipped + flipBit(input, i); // return to original + + for (int o = 0; o < outputBits; o++) { + flips[i][o] += getBit(hash, o) ^ getBit(newHash, o); + } + } + } + + return new AvalancheStats(flips, ITERATIONS); + } + + private static void flipBit(byte[] input, int position) { + int offset = position / 8; + int bit = position & 7; + input[offset] ^= (1 << bit); + } + + private static int getBit(byte[] input, int position) { + int offset = position / 8; + int bit = position & 7; + return (input[offset] >>> bit) & 1; + } +} diff --git a/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsIntegTestCase.java b/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsIntegTestCase.java index e4774030f21cb..5f4d92d65548b 100644 --- a/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/common/logging/JsonLogsIntegTestCase.java @@ -52,11 +52,11 @@ * Fields available upon process startup: type, timestamp, level, component, * message, node.name, cluster.name. * Whereas node.id and cluster.uuid are available later once the first clusterState has been received. - * + *

            * * node.name, cluster.name, node.id, cluster.uuid * should not change across all log lines - * + *

            * Note that this won't pass for nodes in clusters that don't have the node name defined in opensearch.yml and start * with DEBUG or TRACE level logging. Those nodes log a few lines before the node.name is set by LogConfigurator.setNodeName. */ diff --git a/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java b/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java index 6a3748e55394e..2f006a5519d69 100644 --- a/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java +++ b/test/framework/src/main/java/org/opensearch/gateway/MockGatewayMetaState.java @@ -33,6 +33,7 @@ package org.opensearch.gateway; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.metadata.Manifest; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.MetadataIndexUpgradeService; @@ -44,6 +45,8 @@ import org.opensearch.common.util.BigArrays; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.NodeEnvironment; +import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.index.recovery.RemoteStoreRestoreService; import org.opensearch.plugins.MetadataUpgrader; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -62,10 +65,32 @@ public class MockGatewayMetaState extends GatewayMetaState { private final DiscoveryNode localNode; private final BigArrays bigArrays; + private final RemoteClusterStateService remoteClusterStateService; + private final RemoteStoreRestoreService remoteStoreRestoreService; + private boolean prepareFullState = false; + + public MockGatewayMetaState(DiscoveryNode localNode, BigArrays bigArrays, boolean prepareFullState) { + this(localNode, bigArrays); + this.prepareFullState = prepareFullState; + } public MockGatewayMetaState(DiscoveryNode localNode, BigArrays bigArrays) { this.localNode = localNode; this.bigArrays = bigArrays; + this.remoteClusterStateService = mock(RemoteClusterStateService.class); + this.remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + } + + public MockGatewayMetaState( + DiscoveryNode localNode, + BigArrays bigArrays, + RemoteClusterStateService remoteClusterStateService, + RemoteStoreRestoreService remoteStoreRestoreService + ) { + this.localNode = localNode; + this.bigArrays = bigArrays; + this.remoteClusterStateService = remoteClusterStateService; + this.remoteStoreRestoreService = remoteStoreRestoreService; } @Override @@ -80,11 +105,35 @@ Metadata upgradeMetadataForNode( @Override ClusterState prepareInitialClusterState(TransportService transportService, ClusterService clusterService, ClusterState clusterState) { - // Just set localNode here, not to mess with ClusterService and IndicesService mocking - return ClusterStateUpdaters.setLocalNode(clusterState, localNode); + if (prepareFullState) { + return super.prepareInitialClusterState(transportService, clusterService, clusterState); + } else { + // Just set localNode here, not to mess with ClusterService and IndicesService mocking + return ClusterStateUpdaters.setLocalNode(clusterState, localNode); + } } - public void start(Settings settings, NodeEnvironment nodeEnvironment, NamedXContentRegistry xContentRegistry) { + @Override + public void close() throws IOException { + super.close(); + } + + public void start( + Settings settings, + NodeEnvironment nodeEnvironment, + NamedXContentRegistry xContentRegistry, + PersistedStateRegistry persistedStateRegistry + ) { + start(settings, nodeEnvironment, xContentRegistry, persistedStateRegistry, false); + } + + public void start( + Settings settings, + NodeEnvironment nodeEnvironment, + NamedXContentRegistry xContentRegistry, + PersistedStateRegistry persistedStateRegistry, + boolean prepareFullState + ) { final TransportService transportService = mock(TransportService.class); when(transportService.getThreadPool()).thenReturn(mock(ThreadPool.class)); final ClusterService clusterService = mock(ClusterService.class); @@ -97,6 +146,7 @@ public void start(Settings settings, NodeEnvironment nodeEnvironment, NamedXCont } catch (IOException e) { throw new AssertionError(e); } + this.prepareFullState = prepareFullState; start( settings, transportService, @@ -110,7 +160,10 @@ public void start(Settings settings, NodeEnvironment nodeEnvironment, NamedXCont bigArrays, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L - ) + ), + remoteClusterStateService, + persistedStateRegistry, + remoteStoreRestoreService ); } } diff --git a/test/framework/src/main/java/org/opensearch/index/MockEngineFactoryPlugin.java b/test/framework/src/main/java/org/opensearch/index/MockEngineFactoryPlugin.java index 72d34676850b9..0a47db4c740d6 100644 --- a/test/framework/src/main/java/org/opensearch/index/MockEngineFactoryPlugin.java +++ b/test/framework/src/main/java/org/opensearch/index/MockEngineFactoryPlugin.java @@ -46,7 +46,7 @@ /** * A plugin to use {@link MockEngineFactory}. - * + *

            * Subclasses may override the reader wrapper used. */ public class MockEngineFactoryPlugin extends Plugin implements EnginePlugin { diff --git a/test/framework/src/main/java/org/opensearch/index/RandomCreateIndexGenerator.java b/test/framework/src/main/java/org/opensearch/index/RandomCreateIndexGenerator.java index f016d9450425d..5ab77783b2bac 100644 --- a/test/framework/src/main/java/org/opensearch/index/RandomCreateIndexGenerator.java +++ b/test/framework/src/main/java/org/opensearch/index/RandomCreateIndexGenerator.java @@ -56,7 +56,7 @@ private RandomCreateIndexGenerator() {} /** * Returns a random {@link CreateIndexRequest}. - * + *

            * Randomizes the index name, the aliases, mappings and settings associated with the * index. If present, the mapping definition will be nested under a type name. */ diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index 018bc675c069b..d898c81159087 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -527,7 +527,7 @@ protected Translog createTranslog(LongSupplier primaryTermSupplier) throws IOExc } protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSupplier) throws IOException { - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE); + TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE, ""); String translogUUID = Translog.createEmptyTranslog( translogPath, SequenceNumbers.NO_OPS_PERFORMED, @@ -840,10 +840,45 @@ public EngineConfig config( final @Nullable Supplier maybeRetentionLeasesSupplier, final CircuitBreakerService breakerService ) { - final IndexWriterConfig iwc = newIndexWriterConfig(); - final TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); final Engine.EventListener eventListener = new Engine.EventListener() { }; // we don't need to notify anybody in this test + + return config( + indexSettings, + store, + translogPath, + mergePolicy, + externalRefreshListener, + internalRefreshListener, + indexSort, + maybeGlobalCheckpointSupplier, + maybeGlobalCheckpointSupplier == null ? null : () -> RetentionLeases.EMPTY, + breakerService, + eventListener + ); + } + + public EngineConfig config( + final IndexSettings indexSettings, + final Store store, + final Path translogPath, + final MergePolicy mergePolicy, + final ReferenceManager.RefreshListener externalRefreshListener, + final ReferenceManager.RefreshListener internalRefreshListener, + final Sort indexSort, + final @Nullable LongSupplier maybeGlobalCheckpointSupplier, + final @Nullable Supplier maybeRetentionLeasesSupplier, + final CircuitBreakerService breakerService, + final Engine.EventListener eventListener + ) { + final IndexWriterConfig iwc = newIndexWriterConfig(); + final TranslogConfig translogConfig = new TranslogConfig( + shardId, + translogPath, + indexSettings, + BigArrays.NON_RECYCLING_INSTANCE, + "" + ); final List extRefreshListenerList = externalRefreshListener == null ? emptyList() : Collections.singletonList(externalRefreshListener); @@ -910,7 +945,7 @@ protected EngineConfig config( .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .build() ); - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); + TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE, ""); return new EngineConfig.Builder().shardId(config.getShardId()) .threadPool(config.getThreadPool()) .indexSettings(indexSettings) diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase.java index 83bb2f1ee7d65..77137073aa30f 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase.java @@ -149,7 +149,7 @@ protected Set unsupportedProperties() { /** * Add type-specific modifiers for consistency checking. - * + *

            * This should be called in a {@code @Before} method */ protected void addModifier(String property, boolean updateable, BiConsumer method) { @@ -158,7 +158,7 @@ protected void addModifier(String property, boolean updateable, BiConsumer /** * Add type-specific modifiers for consistency checking. - * + *

            * This should be called in a {@code @Before} method */ protected void addBooleanModifier(String property, boolean updateable, BiConsumer method) { diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase2.java b/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase2.java index 1987a49431bf9..5dfb2f16a1aae 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase2.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/FieldMapperTestCase2.java @@ -139,7 +139,7 @@ protected Set unsupportedProperties() { /** * Add type-specific modifiers for consistency checking. - * + *

            * This should be called in a {@code @Before} method */ protected void addModifier(String property, boolean updateable, BiConsumer method) { @@ -148,7 +148,7 @@ protected void addModifier(String property, boolean updateable, BiConsumer /** * Add type-specific modifiers for consistency checking. - * + *

            * This should be called in a {@code @Before} method */ protected void addBooleanModifier(String property, boolean updateable, BiConsumer method) { diff --git a/test/framework/src/main/java/org/opensearch/index/replication/TestReplicationSource.java b/test/framework/src/main/java/org/opensearch/index/replication/TestReplicationSource.java index b29e25a0bff2c..bcd47e3d578ee 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/TestReplicationSource.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/TestReplicationSource.java @@ -17,6 +17,7 @@ import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import java.util.List; +import java.util.function.BiConsumer; /** * This class is used by unit tests implementing SegmentReplicationSource @@ -36,6 +37,7 @@ public abstract void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 8348584379f9c..412d5235fe462 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -97,7 +97,8 @@ import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.SourceToParse; -import org.opensearch.index.remote.RemoteStorePressureService; +import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.replication.TestReplicationSource; import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLeaseSyncer; @@ -118,6 +119,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.indices.recovery.AsyncRecoveryTarget; +import org.opensearch.indices.recovery.DefaultRecoverySettings; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryFailedException; import org.opensearch.indices.recovery.RecoveryResponse; @@ -172,6 +174,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; @@ -271,11 +274,11 @@ public Settings threadPoolSettings() { } protected Store createStore(IndexSettings indexSettings, ShardPath shardPath) throws IOException { - return createStore(shardPath.getShardId(), indexSettings, newFSDirectory(shardPath.resolveIndex())); + return createStore(shardPath.getShardId(), indexSettings, newFSDirectory(shardPath.resolveIndex()), shardPath); } - protected Store createStore(ShardId shardId, IndexSettings indexSettings, Directory directory) throws IOException { - return new Store(shardId, indexSettings, directory, new DummyShardLock(shardId)); + protected Store createStore(ShardId shardId, IndexSettings indexSettings, Directory directory, ShardPath shardPath) throws IOException { + return new Store(shardId, indexSettings, directory, new DummyShardLock(shardId), Store.OnClose.EMPTY, shardPath); } protected Releasable acquirePrimaryOperationPermitBlockingly(IndexShard indexShard) throws ExecutionException, InterruptedException { @@ -639,8 +642,8 @@ protected IndexShard newShard( Collections.emptyList(), clusterSettings ); - Store remoteStore = null; - RemoteStorePressureService remoteStorePressureService = null; + Store remoteStore; + RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory = null; RepositoriesService mockRepoSvc = mock(RepositoriesService.class); if (indexSettings.isRemoteStoreEnabled()) { @@ -653,20 +656,22 @@ protected IndexShard newShard( remotePath = createTempDir(); } - remoteStore = createRemoteStore(remotePath, routing, indexMetadata); + remoteStore = createRemoteStore(remotePath, routing, indexMetadata, shardPath); - remoteStorePressureService = new RemoteStorePressureService(clusterService, indexSettings.getSettings()); + remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, indexSettings.getSettings()); BlobStoreRepository repo = createRepository(remotePath); when(mockRepoSvc.repository(any())).thenAnswer(invocationOnMock -> repo); + } else { + remoteStore = null; } final BiFunction translogFactorySupplier = (settings, shardRouting) -> { if (settings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { - return new RemoteBlobStoreInternalTranslogFactory( () -> mockRepoSvc, threadPool, - settings.getRemoteStoreTranslogRepository() + settings.getRemoteStoreTranslogRepository(), + new RemoteTranslogTransferTracker(shardRouting.shardId(), 20) ); } return new InternalTranslogFactory(); @@ -695,11 +700,14 @@ protected IndexShard newShard( translogFactorySupplier, checkpointPublisher, remoteStore, - remoteStorePressureService + remoteStoreStatsTrackerFactory, + () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + "dummy-node", + DefaultRecoverySettings.INSTANCE ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); - if (remoteStorePressureService != null) { - remoteStorePressureService.afterIndexShardCreated(indexShard); + if (remoteStoreStatsTrackerFactory != null) { + remoteStoreStatsTrackerFactory.afterIndexShardCreated(indexShard); } success = true; } finally { @@ -766,11 +774,12 @@ protected RepositoriesService createRepositoriesService() { return repositoriesService; } - protected Store createRemoteStore(Path path, ShardRouting shardRouting, IndexMetadata metadata) throws IOException { + protected Store createRemoteStore(Path path, ShardRouting shardRouting, IndexMetadata metadata, ShardPath shardPath) + throws IOException { Settings nodeSettings = Settings.builder().put("node.name", shardRouting.currentNodeId()).build(); ShardId shardId = shardRouting.shardId(); RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = createRemoteSegmentStoreDirectory(shardId, path); - return createStore(shardId, new IndexSettings(metadata, nodeSettings), remoteSegmentStoreDirectory); + return createStore(shardId, new IndexSettings(metadata, nodeSettings), remoteSegmentStoreDirectory, shardPath); } protected RemoteSegmentStoreDirectory createRemoteSegmentStoreDirectory(ShardId shardId, Path path) throws IOException { @@ -781,7 +790,7 @@ protected RemoteSegmentStoreDirectory createRemoteSegmentStoreDirectory(ShardId RemoteStoreLockManager remoteStoreLockManager = new RemoteStoreMetadataLockManager( new RemoteBufferedOutputDirectory(getBlobContainer(remoteShardPath.resolveIndex())) ); - return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, remoteStoreLockManager, threadPool); + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, remoteStoreLockManager, threadPool, shardId); } private RemoteDirectory newRemoteDirectory(Path f) throws IOException { @@ -1078,7 +1087,7 @@ protected void recoverReplica( /** * Recovers a replica from the give primary, allow the user to supply a custom recovery target. A typical usage of a custom recovery * target is to assert things in the various stages of recovery. - * + *

            * Note: this method keeps the shard in {@link IndexShardState#POST_RECOVERY} and doesn't start it. * * @param replica the recovery target shard @@ -1612,6 +1621,7 @@ public void getSegmentFiles( ReplicationCheckpoint checkpoint, List filesToFetch, IndexShard indexShard, + BiConsumer fileProgressTracker, ActionListener listener ) { try ( diff --git a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java index a717d621ca5f3..7e25ed97a7ea1 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java @@ -154,6 +154,16 @@ public long getRestoreThrottleTimeInNanos() { return 0; } + @Override + public long getRemoteUploadThrottleTimeInNanos() { + return 0; + } + + @Override + public long getRemoteDownloadThrottleTimeInNanos() { + return 0; + } + @Override public String startVerification() { return null; @@ -167,6 +177,11 @@ public boolean isReadOnly() { return false; } + @Override + public boolean isSystemRepository() { + return false; + } + @Override public void snapshotShard( Store store, diff --git a/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java index b93cb64e32cfe..c412ae8317f24 100644 --- a/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java @@ -98,6 +98,7 @@ public abstract class AnalysisFactoryTestCase extends OpenSearchTestCase { .put("czechstem", MovedToAnalysisCommon.class) .put("decimaldigit", MovedToAnalysisCommon.class) .put("delimitedpayload", MovedToAnalysisCommon.class) + .put("delimitedtermfrequency", MovedToAnalysisCommon.class) .put("dictionarycompoundword", MovedToAnalysisCommon.class) .put("edgengram", MovedToAnalysisCommon.class) .put("elision", MovedToAnalysisCommon.class) @@ -201,9 +202,6 @@ public abstract class AnalysisFactoryTestCase extends OpenSearchTestCase { .put("daterecognizer", Void.class) // for token filters that generate bad offsets, which are now rejected since Lucene 7 .put("fixbrokenoffsets", Void.class) - // should we expose it, or maybe think about higher level integration of the - // fake term frequency feature (LUCENE-7854) - .put("delimitedtermfrequency", Void.class) // LUCENE-8273: ProtectedTermFilterFactory allows analysis chains to skip // particular token filters based on the attributes of the current token. .put("protectedterm", Void.class) diff --git a/test/framework/src/main/java/org/opensearch/indices/recovery/DefaultRecoverySettings.java b/test/framework/src/main/java/org/opensearch/indices/recovery/DefaultRecoverySettings.java new file mode 100644 index 0000000000000..359668f5dad71 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/indices/recovery/DefaultRecoverySettings.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.recovery; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; + +/** + * Utility to provide a {@link RecoverySettings} instance containing all defaults + */ +public final class DefaultRecoverySettings { + private DefaultRecoverySettings() {} + + public static final RecoverySettings INSTANCE = new RecoverySettings( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); +} diff --git a/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java b/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java index ef13e065968df..0f5e043ee1135 100644 --- a/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java +++ b/test/framework/src/main/java/org/opensearch/mockito/plugin/PriviledgedMockMaker.java @@ -43,7 +43,7 @@ public class PriviledgedMockMaker implements MockMaker { * since Mockito does not support SecurityManager out of the box. The method has to be called by * test framework before the SecurityManager is being set, otherwise additional permissions have * to be granted to the caller: - * + *

            * permission java.security.Permission "createAccessControlContext" * */ diff --git a/test/framework/src/main/java/org/opensearch/node/MockNode.java b/test/framework/src/main/java/org/opensearch/node/MockNode.java index 803a613ba55ff..e6c7e21d5b3ea 100644 --- a/test/framework/src/main/java/org/opensearch/node/MockNode.java +++ b/test/framework/src/main/java/org/opensearch/node/MockNode.java @@ -60,6 +60,7 @@ import org.opensearch.search.SearchService; import org.opensearch.search.fetch.FetchPhase; import org.opensearch.search.query.QueryPhase; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.test.MockHttpTransport; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.ThreadPool; @@ -199,16 +200,35 @@ protected TransportService newTransportService( TransportInterceptor interceptor, Function localNodeFactory, ClusterSettings clusterSettings, - Set taskHeaders + Set taskHeaders, + Tracer tracer ) { // we use the MockTransportService.TestPlugin class as a marker to create a network // module with this MockNetworkService. NetworkService is such an integral part of the systme // we don't allow to plug it in from plugins or anything. this is a test-only override and // can't be done in a production env. if (getPluginsService().filterPlugins(MockTransportService.TestPlugin.class).isEmpty()) { - return super.newTransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); + return super.newTransportService( + settings, + transport, + threadPool, + interceptor, + localNodeFactory, + clusterSettings, + taskHeaders, + tracer + ); } else { - return new MockTransportService(settings, transport, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); + return new MockTransportService( + settings, + transport, + threadPool, + interceptor, + localNodeFactory, + clusterSettings, + taskHeaders, + tracer + ); } } diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java index 4d23e2aecc118..507a100c94e0d 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java @@ -165,6 +165,27 @@ public void testWriteRead() throws IOException { } } + public void testReadRange() throws IOException { + try (BlobStore store = newBlobStore()) { + final BlobContainer container = store.blobContainer(new BlobPath()); + final byte[] data = randomBytes(4096); + + // Pick a subrange starting somewhere between position 100 and 1000 + // and ending somewhere between 100 bytes past that position and + // 100 bytes before the end + final int startOffset = randomIntBetween(100, 1000); + final int endOffset = randomIntBetween(startOffset + 100, data.length - 100); + final byte[] subrangeData = Arrays.copyOfRange(data, startOffset, endOffset); + + writeBlob(container, "foobar", new BytesArray(data), randomBoolean()); + try (InputStream stream = container.readBlob("foobar", startOffset, subrangeData.length)) { + final byte[] actual = stream.readAllBytes(); + assertArrayEquals(subrangeData, actual); + } + container.delete(); + } + } + public void testList() throws IOException { try (BlobStore store = newBlobStore()) { final BlobContainer container = store.blobContainer(new BlobPath()); diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java index 9ced3f960f02f..13972ec254ca7 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java @@ -238,7 +238,7 @@ public void testRequestStats() throws Exception { assertEquals(assertionErrorMsg, mockCalls, sdkRequestCounts); } - private Map getMockRequestCounts() { + protected Map getMockRequestCounts() { for (HttpHandler h : handlers.values()) { while (h instanceof DelegatingHttpHandler) { if (h instanceof HttpStatsCollectorHandler) { @@ -265,7 +265,7 @@ protected static void drainInputStream(final InputStream inputStream) throws IOE /** * HTTP handler that injects random service errors - * + *

            * Note: it is not a good idea to allow this handler to simulate too many errors as it would * slow down the test suite. */ @@ -339,7 +339,7 @@ public interface DelegatingHttpHandler extends HttpHandler { /** * HTTP handler that allows collect request stats per request type. - * + *

            * Implementors should keep track of the desired requests on {@link #maybeTrack(String, Headers)}. */ @SuppressForbidden(reason = "this test uses a HttpServer to emulate a cloud-based storage service") @@ -377,7 +377,7 @@ public void handle(HttpExchange exchange) throws IOException { /** * Tracks the given request if it matches the criteria. - * + *

            * The request is represented as: * Request = Method SP Request-URI * diff --git a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java index cb0614ddeb808..83b245a1bcecb 100644 --- a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java @@ -58,14 +58,14 @@ /** * A mocked script engine that can be used for testing purpose. - * + *

            * This script engine allows to define a set of predefined scripts that basically a combination of a key and a * function: - * + *

            * The key can be anything as long as it is a {@link String} and is used to resolve the scripts * at compilation time. For inline scripts, the key can be a description of the script. For stored and file scripts, * the source must match a key in the predefined set of scripts. - * + *

            * The function is used to provide the result of the script execution and can return anything. */ public class MockScriptEngine implements ScriptEngine { diff --git a/test/framework/src/main/java/org/opensearch/script/ScoreAccessor.java b/test/framework/src/main/java/org/opensearch/script/ScoreAccessor.java index 26b439fa6438f..9cf2141555957 100644 --- a/test/framework/src/main/java/org/opensearch/script/ScoreAccessor.java +++ b/test/framework/src/main/java/org/opensearch/script/ScoreAccessor.java @@ -39,7 +39,7 @@ /** * A float encapsulation that dynamically accesses the score of a document. - * + *

            * The provided {@link DocLookup} is used to retrieve the score * for the current document. */ diff --git a/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java index b942136e1f1e2..74de1e6d96d93 100644 --- a/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java +++ b/test/framework/src/main/java/org/opensearch/search/RandomSearchRequestGenerator.java @@ -131,6 +131,9 @@ public static SearchRequest randomSearchRequest(Supplier ra if (randomBoolean()) { searchRequest.setCancelAfterTimeInterval(TimeValue.parseTimeValue(randomTimeValue(), null, "cancel_after_time_interval")); } + if (randomBoolean()) { + searchRequest.setPhaseTook(randomBoolean()); + } return searchRequest; } diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index 3a6147850f090..82f15a590bea6 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -685,7 +685,7 @@ protected static IndexReader maybeWrapReaderEs(DirectoryReader reader) throws IO * Implementors should return a list of {@link ValuesSourceType} that the aggregator supports. * This is used to test the matrix of supported/unsupported field types against the aggregator * and verify it works (or doesn't) as expected. - * + *

            * If this method is implemented, {@link AggregatorTestCase#createAggBuilderForTypeTest(MappedFieldType, String)} * should be implemented as well. * @@ -702,7 +702,7 @@ protected List getSupportedValuesSourceTypes() { * The field type and name are provided, and the implementor is expected to return an AggBuilder accordingly. * The AggBuilder should be returned even if the aggregation does not support the field type, because * the test will check if an exception is thrown in that case. - * + *

            * The list of supported types are provided by {@link AggregatorTestCase#getSupportedValuesSourceTypes()}, * which must also be implemented. * @@ -720,7 +720,7 @@ protected AggregationBuilder createAggBuilderForTypeTest(MappedFieldType fieldTy * A method that allows implementors to specifically denylist particular field types (based on their content_name). * This is needed in some areas where the ValuesSourceType is not granular enough, for example integer values * vs floating points, or `keyword` bytes vs `binary` bytes (which are not searchable) - * + *

            * This is a denylist instead of an allowlist because there are vastly more field types than ValuesSourceTypes, * and it's expected that these unsupported cases are exceptional rather than common */ @@ -734,7 +734,7 @@ protected List unsupportedMappedFieldTypes() { * is provided by the implementor class, and it is executed against each field type in turn. If * an exception is thrown when the field is supported, that will fail the test. Similarly, if * an exception _is not_ thrown when a field is unsupported, that will also fail the test. - * + *

            * Exception types/messages are not currently checked, just presence/absence of an exception. */ public void testSupportedFieldTypes() throws IOException { @@ -825,7 +825,7 @@ private ValuesSourceType fieldToVST(MappedFieldType fieldType) { /** * Helper method to write a single document with a single value specific to the requested fieldType. - * + *

            * Throws an exception if it encounters an unknown field type, to prevent new ones from sneaking in without * being tested. */ @@ -1059,6 +1059,11 @@ public InternalAggregation buildEmptyAggregation() { } }; } + + @Override + protected boolean supportsConcurrentSegmentSearch() { + return true; + } }; } diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/bucket/AbstractTermsTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/bucket/AbstractTermsTestCase.java index 75192e276982e..8e94f2cacf070 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/bucket/AbstractTermsTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/bucket/AbstractTermsTestCase.java @@ -32,16 +32,41 @@ package org.opensearch.search.aggregations.bucket; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.opensearch.search.aggregations.bucket.terms.Terms; import org.opensearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode; -import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; -public abstract class AbstractTermsTestCase extends OpenSearchIntegTestCase { +public abstract class AbstractTermsTestCase extends ParameterizedOpenSearchIntegTestCase { + + public AbstractTermsTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } public String randomExecutionHint() { return randomBoolean() ? null : randomFrom(ExecutionMode.values()).toString(); diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java index 1b348dc7d41a7..6b5ec838f401d 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/composite/BaseCompositeAggregatorTestCase.java @@ -305,6 +305,6 @@ protected static Map createAfterKey(Object... fields) { } protected static long asLong(String dateTime) { - return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); + return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/metrics/AbstractNumericTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/metrics/AbstractNumericTestCase.java index a4f6b97115bb0..103b67e2782de 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/metrics/AbstractNumericTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/metrics/AbstractNumericTestCase.java @@ -31,18 +31,43 @@ package org.opensearch.search.aggregations.metrics; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; @OpenSearchIntegTestCase.SuiteScopeTestCase -public abstract class AbstractNumericTestCase extends OpenSearchIntegTestCase { +public abstract class AbstractNumericTestCase extends ParameterizedOpenSearchIntegTestCase { protected static long minValue, maxValue, minValues, maxValues; + public AbstractNumericTestCase(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index b04e71d0fca52..65db86e14fd91 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -331,6 +331,12 @@ public static void blockNodeOnAnyFiles(String repository, String nodeName) { ); } + public static void blockNodeOnAnySegmentFile(String repository, String nodeName) { + ((MockRepository) internalCluster().getInstance(RepositoriesService.class, nodeName).repository(repository)).blockOnSegmentFiles( + true + ); + } + public static void blockDataNode(String repository, String nodeName) { ((MockRepository) internalCluster().getInstance(RepositoriesService.class, nodeName).repository(repository)).blockOnDataFiles(true); } @@ -558,6 +564,11 @@ protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String rem .prepareGetSettings(remoteStoreIndex) .get() .getSetting(remoteStoreIndex, IndexMetadata.SETTING_INDEX_UUID); + return getLockFilesInRemoteStore(remoteStoreIndex, remoteStoreRepositoryName, indexUUID); + } + + protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String remoteStoreRepositoryName, String indexUUID) + throws IOException { final RepositoriesService repositoriesService = internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class); final BlobStoreRepository remoteStoreRepository = (BlobStoreRepository) repositoriesService.repository(remoteStoreRepositoryName); BlobPath shardLevelBlobPath = remoteStoreRepository.basePath().add(indexUUID).add("0").add("segments").add("lock_files"); diff --git a/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java b/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java index 7db71c4be0968..72c4ba44d0a31 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/mockstore/MockRepository.java @@ -139,6 +139,8 @@ public long getFailureCount() { private volatile boolean blockOnDataFiles; + private volatile boolean blockOnSegmentFiles; + private volatile boolean blockOnDeleteIndexN; /** @@ -190,6 +192,7 @@ public MockRepository( maximumNumberOfFailures = metadata.settings().getAsLong("max_failure_number", 100L); blockOnAnyFiles = metadata.settings().getAsBoolean("block_on_control", false); blockOnDataFiles = metadata.settings().getAsBoolean("block_on_data", false); + blockOnSegmentFiles = metadata.settings().getAsBoolean("block_on_segment", false); blockAndFailOnWriteSnapFile = metadata.settings().getAsBoolean("block_on_snap", false); randomPrefix = metadata.settings().get("random", "default"); waitAfterUnblock = metadata.settings().getAsLong("wait_after_unblock", 0L); @@ -237,6 +240,7 @@ public synchronized void unblock() { blocked = false; // Clean blocking flags, so we wouldn't try to block again blockOnDataFiles = false; + blockOnSegmentFiles = false; blockOnAnyFiles = false; blockAndFailOnWriteIndexFile = false; blockOnWriteIndexFile = false; @@ -259,6 +263,14 @@ public void setBlockOnAnyFiles(boolean blocked) { blockOnAnyFiles = blocked; } + public void blockOnSegmentFiles(boolean blocked) { + blockOnSegmentFiles = blocked; + } + + public void setBlockOnSegmentFiles(boolean blocked) { + blockOnSegmentFiles = blocked; + } + public void setBlockAndFailOnWriteSnapFiles(boolean blocked) { blockAndFailOnWriteSnapFile = blocked; } @@ -306,6 +318,7 @@ private synchronized boolean blockExecution() { boolean wasBlocked = false; try { while (blockOnDataFiles + || blockOnSegmentFiles || blockOnAnyFiles || blockAndFailOnWriteIndexFile || blockOnWriteIndexFile @@ -407,6 +420,8 @@ private void maybeIOExceptionOrBlock(String blobName) throws IOException { blockExecutionAndMaybeWait(blobName); } else if (blobName.startsWith("snap-") && blockAndFailOnWriteSnapFile) { blockExecutionAndFail(blobName); + } else if (blockOnSegmentFiles && blobName.contains(".si__")) { + blockExecutionAndMaybeWait(blobName); } } } diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractBuilderTestCase.java index 8a508a2115210..656397d7673d6 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractBuilderTestCase.java @@ -35,6 +35,7 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.SeedUtils; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.Accountable; import org.opensearch.Version; @@ -69,6 +70,8 @@ import org.opensearch.index.fielddata.IndexFieldDataService; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.indices.IndicesModule; @@ -316,6 +319,20 @@ protected static QueryShardContext createShardContext() { return createShardContext(null); } + protected static QueryBuilderVisitor createTestVisitor(List visitedQueries) { + return new QueryBuilderVisitor() { + @Override + public void accept(QueryBuilder qb) { + visitedQueries.add(qb); + } + + @Override + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + return this; + } + }; + } + private static class ClientInvocationHandler implements InvocationHandler { AbstractBuilderTestCase delegate; diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractDiffableSerializationTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractDiffableSerializationTestCase.java index f5d358a162bd1..10e782e6af8da 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractDiffableSerializationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractDiffableSerializationTestCase.java @@ -41,8 +41,8 @@ /** * An abstract test case to ensure correct behavior of Diffable. - * - * This class can be used as a based class for tests of Metadata.Custom classes and other classes that support, + *

            + * This class can be used as a based class for tests of Metadata.Custom classes and other classes that support {@link org.opensearch.core.common.io.stream.Writeable } serialization, XContent-based serialization and is diffable. * Writable serialization, XContent-based serialization and is diffable. */ public abstract class AbstractDiffableSerializationTestCase & ToXContent> extends AbstractSerializingTestCase { diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractDiffableWireSerializationTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractDiffableWireSerializationTestCase.java index ff7c39cd8f102..3f97f0704d733 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractDiffableWireSerializationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractDiffableWireSerializationTestCase.java @@ -40,8 +40,8 @@ /** * An abstract test case to ensure correct behavior of Diffable. - * - * This class can be used as a based class for tests of ClusterState.Custom classes and other classes that support, + *

            + * This class can be used as a based class for tests of ClusterState.Custom classes and other classes that support {@link org.opensearch.core.common.io.stream.Writeable } serialization and is diffable. * Writable serialization and is diffable. */ public abstract class AbstractDiffableWireSerializationTestCase> extends AbstractWireSerializingTestCase { diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java index 47fe85d28975f..afd93e1b72fbb 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractQueryTestCase.java @@ -213,7 +213,7 @@ public void testUnknownObjectException() throws IOException { /** * Traverses the json tree of the valid query provided as argument and mutates it one or more times by adding one object within each * object encountered. - * + *

            * For instance given the following valid term query: * { * "term" : { @@ -222,7 +222,7 @@ public void testUnknownObjectException() throws IOException { * } * } * } - * + *

            * The following two mutations will be generated, and an exception is expected when trying to parse them: * { * "term" : { @@ -233,7 +233,7 @@ public void testUnknownObjectException() throws IOException { * } * } * } - * + *

            * { * "term" : { * "field" : { @@ -243,7 +243,7 @@ public void testUnknownObjectException() throws IOException { * } * } * } - * + *

            * Every mutation is then added to the list of results with a boolean flag indicating if a parsing exception is expected or not * for the mutation. Some specific objects do not cause any exception as they can hold arbitrary content; they are passed using the * arbitraryMarkers parameter. @@ -768,7 +768,7 @@ protected static String randomMinimumShouldMatch() { /** * Call this method to check a valid json string representing the query under test against * it's generated json. - * + *

            * Note: By the time of this writing (Nov 2015) all queries are taken from the query dsl * reference docs mirroring examples there. Here's how the queries were generated: * diff --git a/test/framework/src/main/java/org/opensearch/test/AbstractWireTestCase.java b/test/framework/src/main/java/org/opensearch/test/AbstractWireTestCase.java index 9a4363dc4d946..64c5d916d55d2 100644 --- a/test/framework/src/main/java/org/opensearch/test/AbstractWireTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/AbstractWireTestCase.java @@ -124,9 +124,9 @@ protected final T copyInstance(T instance) throws IOException { /** * Get the {@link NamedWriteableRegistry} to use when de-serializing the object. - * + *

            * Override this method if you need to register {@link NamedWriteable}s for the test object to de-serialize. - * + *

            * By default this will return a {@link NamedWriteableRegistry} with no registered {@link NamedWriteable}s */ protected NamedWriteableRegistry getNamedWriteableRegistry() { diff --git a/test/framework/src/main/java/org/opensearch/test/CorruptionUtils.java b/test/framework/src/main/java/org/opensearch/test/CorruptionUtils.java index 0dce5e78bf91f..67522bb618cf1 100644 --- a/test/framework/src/main/java/org/opensearch/test/CorruptionUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/CorruptionUtils.java @@ -121,7 +121,7 @@ public static void corruptFile(Random random, Path... files) throws IOException } } - static void corruptAt(Path path, FileChannel channel, int position) throws IOException { + public static void corruptAt(Path path, FileChannel channel, int position) throws IOException { // read channel.position(position); long filePointer = channel.position(); diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 6436e0fdcb72a..cd26c132b689b 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -55,6 +55,7 @@ import org.opensearch.cluster.coordination.ClusterBootstrapService; import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; @@ -1318,6 +1319,12 @@ public synchronized void validateClusterFormed() { assertTrue("Expected node to exist: " + expectedNode + debugString, discoveryNodes.nodeExists(expectedNode)); } }); + states.forEach(cs -> { + if (cs.nodes().getNodes().values().stream().findFirst().get().isRemoteStoreNode()) { + RepositoriesMetadata repositoriesMetadata = cs.metadata().custom(RepositoriesMetadata.TYPE); + assertTrue(repositoriesMetadata != null && !repositoriesMetadata.repositories().isEmpty()); + } + }); }, 30, TimeUnit.SECONDS); } catch (AssertionError ae) { throw new IllegalStateException("cluster failed to form", ae); @@ -1809,7 +1816,7 @@ public synchronized void stopCurrentClusterManagerNode() throws IOException { /** * Stops any of the current nodes but not the cluster-manager node. */ - public synchronized void stopRandomNonClusterManagerNode() throws IOException { + public synchronized void stopRandomNodeNotCurrentClusterManager() throws IOException { NodeAndClient nodeAndClient = getRandomNodeAndClient(new NodeNamePredicate(getClusterManagerName()).negate()); if (nodeAndClient != null) { logger.info( @@ -1834,11 +1841,46 @@ public synchronized void stopCurrentMasterNode() throws IOException { /** * Stops any of the current nodes but not the cluster-manager node. * - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #stopRandomNonClusterManagerNode()} + * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #stopRandomNodeNotCurrentClusterManager()} */ @Deprecated - public synchronized void stopRandomNonMasterNode() throws IOException { - stopRandomNonClusterManagerNode(); + public synchronized void stopRandomNodeNotCurrentMaster() throws IOException { + stopRandomNodeNotCurrentClusterManager(); + } + + /** + * Stops all running nodes in cluster + */ + public void stopAllNodes() { + try { + if (numDataAndClusterManagerNodes() != numClusterManagerNodes()) { + int totalDataNodes = numDataNodes(); + while (totalDataNodes > 0) { + stopRandomDataNode(); + totalDataNodes -= 1; + } + } + int totalClusterManagerNodes = numClusterManagerNodes(); + while (totalClusterManagerNodes > 1) { + stopRandomNodeNotCurrentClusterManager(); + totalClusterManagerNodes -= 1; + } + stopCurrentClusterManagerNode(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + /** + * Replace all nodes by stopping all current node and starting new node. + * Used for remote store test cases, where remote state is restored. + */ + public void resetCluster() { + int totalClusterManagerNodes = numClusterManagerNodes(); + int totalDataNodes = numDataNodes(); + stopAllNodes(); + startClusterManagerOnlyNodes(totalClusterManagerNodes); + startDataOnlyNodes(totalDataNodes); } private synchronized void startAndPublishNodesAndClients(List nodeAndClients) { @@ -2690,6 +2732,9 @@ public void ensureEstimatedStats() { false, false, false, + false, + false, + false, false ); assertThat( diff --git a/test/framework/src/main/java/org/opensearch/test/MockKeywordPlugin.java b/test/framework/src/main/java/org/opensearch/test/MockKeywordPlugin.java index c27f3f169fbae..3d829d77dd323 100644 --- a/test/framework/src/main/java/org/opensearch/test/MockKeywordPlugin.java +++ b/test/framework/src/main/java/org/opensearch/test/MockKeywordPlugin.java @@ -44,7 +44,7 @@ /** * Some tests rely on the keyword tokenizer, but this tokenizer isn't part of lucene-core and therefor not available * in some modules. What this test plugin does, is use the mock tokenizer and advertise that as the keyword tokenizer. - * + *

            * Most tests that need this test plugin use normalizers. When normalizers are constructed they try to resolve the * keyword tokenizer, but if the keyword tokenizer isn't available then constructing normalizers will fail. */ diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 665dcb013490a..533c5bfa2e7cb 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -135,9 +135,9 @@ import org.opensearch.http.HttpInfo; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; -import org.opensearch.index.MergePolicyConfig; import org.opensearch.index.MergeSchedulerConfig; import org.opensearch.index.MockEngineFactoryPlugin; +import org.opensearch.index.TieredMergePolicyProvider; import org.opensearch.index.codec.CodecService; import org.opensearch.index.engine.Segment; import org.opensearch.index.mapper.CompletionFieldMapper; @@ -156,7 +156,6 @@ import org.opensearch.script.MockScriptService; import org.opensearch.script.ScriptMetadata; import org.opensearch.search.MockSearchService; -import org.opensearch.search.SearchBootstrapSettings; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchService; import org.opensearch.telemetry.TelemetrySettings; @@ -165,6 +164,7 @@ import org.opensearch.test.disruption.ServiceDisruptionScheme; import org.opensearch.test.store.MockFSIndexStore; import org.opensearch.test.telemetry.MockTelemetryPlugin; +import org.opensearch.test.telemetry.tracing.StrictCheckSpanProcessor; import org.opensearch.test.transport.MockTransportService; import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; @@ -384,9 +384,7 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { CodecService.DEFAULT_CODEC, CodecService.LZ4, CodecService.BEST_COMPRESSION_CODEC, - CodecService.ZLIB, - CodecService.ZSTD_CODEC, - CodecService.ZSTD_NO_DICT_CODEC + CodecService.ZLIB ); @BeforeClass @@ -509,7 +507,7 @@ protected Settings.Builder setRandomIndexSettings(Random random, Settings.Builde private static Settings.Builder setRandomIndexMergeSettings(Random random, Settings.Builder builder) { if (random.nextBoolean()) { builder.put( - MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), + TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING.getKey(), (random.nextBoolean() ? random.nextDouble() : random.nextBoolean()).toString() ); } @@ -798,6 +796,30 @@ protected Settings featureFlagSettings() { return featureSettings.build(); } + /** + * Represent if it needs to trigger remote state restore or not. + * For tests with remote store enabled domain, it will be overridden to true. + * + * @return if needs to perform remote state restore or not + */ + protected boolean triggerRemoteStateRestore() { + return false; + } + + /** + * For tests with remote cluster state, it will reset the cluster and cluster state will be + * restored from remote. + */ + protected void performRemoteStoreTestAction() { + if (triggerRemoteStateRestore()) { + String clusterUUIDBefore = clusterService().state().metadata().clusterUUID(); + internalCluster().resetCluster(); + String clusterUUIDAfter = clusterService().state().metadata().clusterUUID(); + // assertion that UUID is changed post restore. + assertFalse(clusterUUIDBefore.equals(clusterUUIDAfter)); + } + } + /** * Creates one or more indices and asserts that the indices are acknowledged. If one of the indices * already exists this method will fail and wipe all the indices created so far. @@ -1383,7 +1405,7 @@ protected void ensureStableCluster(int nodeCount, TimeValue timeValue, boolean l /** * Ensures that all nodes in the cluster are connected to each other. - * + *

            * Some network disruptions may leave nodes that are not the cluster-manager disconnected from each other. * {@link org.opensearch.cluster.NodeConnectionsService} will eventually reconnect but it's * handy to be able to ensure this happens faster @@ -1496,6 +1518,18 @@ protected ForceMergeResponse forceMerge() { return actionGet; } + protected ForceMergeResponse forceMerge(int maxNumSegments) { + waitForRelocation(); + ForceMergeResponse actionGet = client().admin() + .indices() + .prepareForceMerge() + .setMaxNumSegments(maxNumSegments) + .execute() + .actionGet(); + assertNoFailures(actionGet); + return actionGet; + } + /** * Returns true iff the given index exists otherwise false */ @@ -1670,6 +1704,11 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma } } assertThat(actualErrors, emptyIterable()); + + if (dummyDocuments) { + bogusIds.addAll(indexRandomForMultipleSlices(indicesArray)); + } + if (!bogusIds.isEmpty()) { // delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs! for (List doc : bogusIds) { @@ -1687,6 +1726,52 @@ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean ma } } + /* + * This method ingests bogus documents for the given indices such that multiple slices + * are formed. This is useful for testing with the concurrent search use-case as it creates + * multiple slices based on segment count. + * @param indices the indices in which bogus documents should be ingested + * */ + protected Set> indexRandomForMultipleSlices(String... indices) throws InterruptedException { + Set> bogusIds = new HashSet<>(); + int refreshCount = randomIntBetween(2, 3); + for (String index : indices) { + int numDocs = getNumShards(index).totalNumShards * randomIntBetween(2, 10); + while (refreshCount-- > 0) { + final CopyOnWriteArrayList> errors = new CopyOnWriteArrayList<>(); + List inFlightAsyncOperations = new ArrayList<>(); + for (int i = 0; i < numDocs; i++) { + String id = "bogus_doc_" + randomRealisticUnicodeOfLength(between(1, 10)) + dummmyDocIdGenerator.incrementAndGet(); + IndexRequestBuilder indexRequestBuilder = client().prepareIndex() + .setIndex(index) + .setId(id) + .setSource("{}", MediaTypeRegistry.JSON) + .setRouting(id); + indexRequestBuilder.execute( + new PayloadLatchedActionListener<>(indexRequestBuilder, newLatch(inFlightAsyncOperations), errors) + ); + bogusIds.add(Arrays.asList(index, id)); + } + for (CountDownLatch operation : inFlightAsyncOperations) { + operation.await(); + } + final List actualErrors = new ArrayList<>(); + for (Tuple tuple : errors) { + Throwable t = ExceptionsHelper.unwrapCause(tuple.v2()); + if (t instanceof OpenSearchRejectedExecutionException) { + logger.debug("Error indexing doc: " + t.getMessage() + ", reindexing."); + tuple.v1().execute().actionGet(); // re-index if rejected + } else { + actualErrors.add(tuple.v2()); + } + } + assertThat(actualErrors, emptyIterable()); + refresh(index); + } + } + return bogusIds; + } + private final AtomicInteger dummmyDocIdGenerator = new AtomicInteger(); /** Disables an index block for the specified index */ @@ -1960,15 +2045,16 @@ protected Settings nodeSettings(int nodeOrdinal) { // fixed thread pool builder.put("thread_pool.search.min_queue_size", 100); } - if (FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING.get(featureFlagSettings)) { - // By default, for tests we will put the target slice count of 2. This will increase the probability of having multiple slices - // when tests are run with concurrent segment search enabled - builder.put(SearchBootstrapSettings.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, 2); - } // Enable tracer only when Telemetry Setting is enabled if (featureFlagSettings().getAsBoolean(FeatureFlags.TELEMETRY_SETTING.getKey(), false)) { + builder.put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), true); builder.put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true); } + if (FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING.get(featureFlagSettings)) { + // By default, for tests we will put the target slice count of 2. This will increase the probability of having multiple slices + // when tests are run with concurrent segment search enabled + builder.put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, 2); + } return builder.build(); } @@ -2124,7 +2210,11 @@ protected boolean addMockGeoShapeFieldMapper() { return true; } - /** Returns {@code true} if this test cluster should have tracing enabled with MockTelemetryPlugin */ + /** + * Returns {@code true} if this test cluster should have tracing enabled with MockTelemetryPlugin + * Disabling this for now as the existing way of strict check do not support multiple nodes internal cluster. + * @return boolean. + */ protected boolean addMockTelemetryPlugin() { return true; } @@ -2331,6 +2421,7 @@ public static void afterClass() throws Exception { INSTANCE.afterInternal(true); checkStaticState(true); } + StrictCheckSpanProcessor.validateTracingStateOnShutdown(); } finally { SUITE_SEED = null; currentCluster = null; @@ -2340,16 +2431,20 @@ public static void afterClass() throws Exception { private static void initializeSuiteScope() throws Exception { Class targetClass = getTestClass(); - /** - * Note we create these test class instance via reflection - * since JUnit creates a new instance per test and that is also - * the reason why INSTANCE is static since this entire method - * must be executed in a static context. + /* + Note we create these test class instance via reflection + since JUnit creates a new instance per test and that is also + the reason why INSTANCE is static since this entire method + must be executed in a static context. */ assert INSTANCE == null; if (isSuiteScopedTest(targetClass)) { - // note we need to do this this way to make sure this is reproducible - INSTANCE = (OpenSearchIntegTestCase) targetClass.getConstructor().newInstance(); + // note we need to do this way to make sure this is reproducible + if (isSuiteScopedTestParameterized(targetClass)) { + INSTANCE = (OpenSearchIntegTestCase) targetClass.getConstructor(Settings.class).newInstance(Settings.EMPTY); + } else { + INSTANCE = (OpenSearchIntegTestCase) targetClass.getConstructor().newInstance(); + } boolean success = false; try { INSTANCE.printTestMessage("setup"); @@ -2444,6 +2539,16 @@ private static boolean isSuiteScopedTest(Class clazz) { return clazz.getAnnotation(SuiteScopeTestCase.class) != null; } + /* + * For tests defined with, SuiteScopeTestCase return true if the + * class has a constructor that takes a single Settings parameter + * */ + private static boolean isSuiteScopedTestParameterized(Class clazz) { + return Arrays.stream(clazz.getConstructors()) + .filter(x -> x.getParameterTypes().length == 1) + .anyMatch(x -> x.getParameterTypes()[0].equals(Settings.class)); + } + /** * If a test is annotated with {@link SuiteScopeTestCase} * the checks and modifications that are applied to the used test cluster are only done after all tests diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java index 16f01bc02c1e1..13b824e3cd070 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java @@ -69,10 +69,11 @@ import org.opensearch.node.NodeValidationException; import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptService; -import org.opensearch.search.SearchBootstrapSettings; +import org.opensearch.search.SearchService; import org.opensearch.search.internal.SearchContext; import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.test.telemetry.MockTelemetryPlugin; +import org.opensearch.test.telemetry.tracing.StrictCheckSpanProcessor; import org.opensearch.transport.TransportSettings; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -190,6 +191,7 @@ public static void setUpClass() throws Exception { @AfterClass public static void tearDownClass() throws Exception { stopNode(); + StrictCheckSpanProcessor.validateTracingStateOnShutdown(); } /** @@ -250,14 +252,14 @@ private Node newNode() { .putList(DISCOVERY_SEED_HOSTS_SETTING.getKey()) // empty list disables a port scan for other nodes .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), nodeName) .put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true) + .put(TelemetrySettings.TRACER_FEATURE_ENABLED_SETTING.getKey(), true) .put(nodeSettings()) // allow test cases to provide their own settings or override these .put(featureFlagSettings()); - if (Boolean.parseBoolean(settingsBuilder.get(FeatureFlags.CONCURRENT_SEGMENT_SEARCH)) - && (settingsBuilder.get(SearchBootstrapSettings.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY) == null)) { - // By default, for tests we will put the target slice count of 2 if not explicitly set. This will increase the probability of - // having multiple slices when tests are run with concurrent segment search enabled - settingsBuilder.put(SearchBootstrapSettings.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, 2); + if (Boolean.parseBoolean(settingsBuilder.get(FeatureFlags.CONCURRENT_SEGMENT_SEARCH))) { + // By default, for tests we will put the target slice count of 2. This will increase the probability of having multiple slices + // when tests are run with concurrent segment search enabled + settingsBuilder.put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY, 2); } Collection> plugins = getPlugins(); @@ -269,6 +271,7 @@ private Node newNode() { plugins.add(MockHttpTransport.TestPlugin.class); } plugins.add(MockScriptService.TestPlugin.class); + plugins.add(MockTelemetryPlugin.class); Node node = new MockNode(settingsBuilder.build(), plugins, forbidPrivateIndexSettings()); try { diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index d9b3bd9109f23..8490ee4fc39bc 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -65,6 +65,7 @@ import org.opensearch.bootstrap.BootstrapForTesting; import org.opensearch.client.Requests; import org.opensearch.cluster.ClusterModule; +import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.CheckedRunnable; @@ -1543,6 +1544,13 @@ protected NamedWriteableRegistry writableRegistry() { return new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); } + /** + * The {@link PersistedStateRegistry} to use for this test. Subclasses should override and use liberally. + */ + protected PersistedStateRegistry persistedStateRegistry() { + return new PersistedStateRegistry(); + } + /** * Create a "mock" script for use either with {@link MockScriptEngine} or anywhere where you need a script but don't really care about * its contents. diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTokenStreamTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTokenStreamTestCase.java index e853c1e6314e1..9f7f33b351be3 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTokenStreamTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTokenStreamTestCase.java @@ -48,9 +48,9 @@ @TimeoutSuite(millis = TimeUnits.HOUR) @LuceneTestCase.SuppressReproduceLine @LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") -/** - * Basic test case for token streams. the assertion methods in this class will - * run basic checks to enforce correct behavior of the token streams. +/* + Basic test case for token streams. the assertion methods in this class will + run basic checks to enforce correct behavior of the token streams. */ public abstract class OpenSearchTokenStreamTestCase extends BaseTokenStreamTestCase { diff --git a/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java index 636064d8e4f9d..f8813a8c5afa9 100644 --- a/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java @@ -13,6 +13,8 @@ import org.junit.After; import org.junit.Before; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; + /** * Base class for running the tests with parameterization of the dynamic settings * For any class that wants to use parameterization, use @ParametersFactory to generate @@ -44,4 +46,10 @@ public void afterTests() { dynamicSettings.keySet().forEach(settingsToUnset::putNull); client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settingsToUnset).get(); } + + public void indexRandomForConcurrentSearch(String... indices) throws InterruptedException { + if (dynamicSettings.get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey()).equals("true")) { + indexRandomForMultipleSlices(indices); + } + } } diff --git a/test/framework/src/main/java/org/opensearch/test/RemoteStoreTestUtils.java b/test/framework/src/main/java/org/opensearch/test/RemoteStoreTestUtils.java index 0744d5fca853b..2e0b846d801e2 100644 --- a/test/framework/src/main/java/org/opensearch/test/RemoteStoreTestUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/RemoteStoreTestUtils.java @@ -16,12 +16,13 @@ import org.apache.lucene.util.Version; import org.opensearch.common.UUIDs; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; import java.util.HashMap; import java.util.Map; @@ -43,7 +44,7 @@ private RemoteStoreTestUtils() { * @return ByteArrayIndexInput: metadata file bytes with header and footer * @throws IOException IOException */ - public static ByteArrayIndexInput createMetadataFileBytes( + public static InputStream createMetadataFileBytes( Map segmentFilesMap, ReplicationCheckpoint replicationCheckpoint, SegmentInfos segmentInfos @@ -61,7 +62,7 @@ public static ByteArrayIndexInput createMetadataFileBytes( indexOutput.writeBytes(byteArray, byteArray.length); CodecUtil.writeFooter(indexOutput); indexOutput.close(); - return new ByteArrayIndexInput("segment metadata", BytesReference.toBytes(output.bytes())); + return new ByteArrayInputStream(BytesReference.toBytes(output.bytes())); } public static Map getDummyMetadata(String prefix, int commitGeneration) { diff --git a/test/framework/src/main/java/org/opensearch/test/TestCluster.java b/test/framework/src/main/java/org/opensearch/test/TestCluster.java index 61742cd4fb827..8c41e6e5d5b38 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/TestCluster.java @@ -42,10 +42,12 @@ import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexTemplateMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.index.IndexNotFoundException; import org.opensearch.indices.IndexTemplateMissingException; import org.opensearch.repositories.RepositoryMissingException; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.Closeable; @@ -253,7 +255,18 @@ public void wipeRepositories(String... repositories) { } for (String repository : repositories) { try { - client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet(); + List repositoryMetadata = client().admin() + .cluster() + .prepareGetRepositories(repository) + .execute() + .actionGet() + .repositories(); + if (repositoryMetadata.isEmpty() == false + && BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.get(repositoryMetadata.get(0).settings()) == true) { + client().admin().cluster().prepareCleanupRepository(repository).execute().actionGet(); + } else { + client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet(); + } } catch (RepositoryMissingException ex) { // ignore } @@ -263,7 +276,7 @@ public void wipeRepositories(String... repositories) { /** * Ensures that any breaker statistics are reset to 0. - * + *

            * The implementation is specific to the test cluster, because the act of * checking some breaker stats can increase them. */ diff --git a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java index 1a998d7d76a73..bbb3c4a070800 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/opensearch/test/TestSearchContext.java @@ -83,6 +83,8 @@ import java.util.List; import java.util.Map; +import static org.opensearch.test.OpenSearchTestCase.randomIntBetween; + public class TestSearchContext extends SearchContext { public static final SearchShardTarget SHARD_TARGET = new SearchShardTarget( "test", @@ -118,6 +120,7 @@ public class TestSearchContext extends SearchContext { private CollapseContext collapse; protected boolean concurrentSegmentSearchEnabled; private BucketCollectorProcessor bucketCollectorProcessor = NO_OP_BUCKET_COLLECTOR_PROCESSOR; + private int maxSliceCount; /** * Sets the concurrent segment search enabled field @@ -126,6 +129,14 @@ public void setConcurrentSegmentSearchEnabled(boolean concurrentSegmentSearchEna this.concurrentSegmentSearchEnabled = concurrentSegmentSearchEnabled; } + /** + * Sets the maxSliceCount for concurrent search + * @param sliceCount maxSliceCount + */ + public void setMaxSliceCount(int sliceCount) { + this.maxSliceCount = sliceCount; + } + private final Map searchExtBuilders = new HashMap<>(); private ShardSearchRequest request; @@ -163,6 +174,7 @@ public TestSearchContext( this.queryShardContext = queryShardContext; this.searcher = searcher; this.concurrentSegmentSearchEnabled = searcher != null && (searcher.getExecutor() != null); + this.maxSliceCount = randomIntBetween(0, 2); this.scrollContext = scrollContext; } @@ -626,7 +638,7 @@ public Profilers getProfilers() { * Returns concurrent segment search status for the search context */ @Override - public boolean isConcurrentSegmentSearchEnabled() { + public boolean shouldUseConcurrentSearch() { return concurrentSegmentSearchEnabled; } @@ -680,6 +692,21 @@ public BucketCollectorProcessor bucketCollectorProcessor() { return bucketCollectorProcessor; } + @Override + public boolean shouldUseTimeSeriesDescSortOptimization() { + return indexShard != null + && indexShard.isTimeSeriesDescSortOptimizationEnabled() + && sort != null + && sort.isSortOnTimeSeriesField() + && sort.sort.getSort()[0].getReverse() == false; + } + + @Override + public int getTargetMaxSliceCount() { + assert concurrentSegmentSearchEnabled == true : "Please use concurrent search before fetching maxSliceCount"; + return maxSliceCount; + } + /** * Clean the query results by consuming all of it */ diff --git a/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java b/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java index c6fe67ce8fff8..343ad39407785 100644 --- a/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java @@ -164,10 +164,10 @@ private static String differenceBetweenObjectsIgnoringArrayOrder(String path, Ob * This method takes the input xContent data and adds a random field value, inner object or array into each * json object. This can e.g. be used to test if parsers that handle the resulting xContent can handle the * augmented xContent correctly, for example when testing lenient parsing. - * + *

            * If the xContent output contains objects that should be skipped of such treatment, an optional filtering * {@link Predicate} can be supplied that checks xContent paths that should be excluded from this treatment. - * + *

            * This predicate should check the xContent path that we want to insert to and return {@code true} if the * path should be excluded. Paths are string concatenating field names and array indices, so e.g. in: * @@ -188,7 +188,7 @@ private static String differenceBetweenObjectsIgnoringArrayOrder(String path, Ob * * * "foo1.bar.2.baz" would point to the desired insert location. - * + *

            * To exclude inserting into the "foo1" object we would user a {@link Predicate} like *

                  * {@code
            @@ -257,12 +257,12 @@ public static BytesReference insertRandomFields(
                  * This utility method takes an XContentParser and walks the xContent structure to find all
                  * possible paths to where a new object or array starts. This can be used in tests that add random
                  * xContent values to test parsing code for errors or to check their robustness against new fields.
            -     *
            +     * 

            * The path uses dot separated fieldnames and numbers for array indices, similar to what we do in * {@link ObjectPath}. - * + *

            * The {@link Stack} passed in should initially be empty, it gets pushed to by recursive calls - * + *

            * As an example, the following json xContent: *

                  *     {
            diff --git a/test/framework/src/main/java/org/opensearch/test/client/NoOpClient.java b/test/framework/src/main/java/org/opensearch/test/client/NoOpClient.java
            index a0e87d5fd7189..45d779b3e8697 100644
            --- a/test/framework/src/main/java/org/opensearch/test/client/NoOpClient.java
            +++ b/test/framework/src/main/java/org/opensearch/test/client/NoOpClient.java
            @@ -47,7 +47,7 @@
             /**
              * Client that always responds with {@code null} to every request. Override {@link #doExecute(ActionType, ActionRequest, ActionListener)}
              * for testing.
            - *
            + * 

            * See also {@link NoOpNodeClient} if you need to mock a {@link org.opensearch.client.node.NodeClient}. */ public class NoOpClient extends AbstractClient { diff --git a/test/framework/src/main/java/org/opensearch/test/client/NoOpNodeClient.java b/test/framework/src/main/java/org/opensearch/test/client/NoOpNodeClient.java index 7dfe9298e9a92..4e84fe3b91d15 100644 --- a/test/framework/src/main/java/org/opensearch/test/client/NoOpNodeClient.java +++ b/test/framework/src/main/java/org/opensearch/test/client/NoOpNodeClient.java @@ -55,7 +55,7 @@ * Client that always response with {@code null} to every request. Override {@link #doExecute(ActionType, ActionRequest, ActionListener)}, * {@link #executeLocally(ActionType, ActionRequest, ActionListener)}, or {@link #executeLocally(ActionType, ActionRequest, TaskListener)} * for testing. - * + *

            * See also {@link NoOpClient} if you do not specifically need a {@link NodeClient}. */ public class NoOpNodeClient extends NodeClient { diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/DisruptableMockTransport.java b/test/framework/src/main/java/org/opensearch/test/disruption/DisruptableMockTransport.java index 2029d9893ea35..4f3884f97a570 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/DisruptableMockTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/DisruptableMockTransport.java @@ -42,6 +42,7 @@ import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.CloseableConnection; @@ -91,9 +92,10 @@ public TransportService createTransportService( TransportInterceptor interceptor, Function localNodeFactory, @Nullable ClusterSettings clusterSettings, - Set taskHeaders + Set taskHeaders, + Tracer tracer ) { - return new TransportService(settings, this, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); + return new TransportService(settings, this, threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders, tracer); } @Override diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java b/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java index 690e15dd80873..44837c37962b4 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/LongGCDisruption.java @@ -248,7 +248,7 @@ public TimeValue expectedTimeToHeal() { /** * resolves all threads belonging to given node and suspends them if their current stack trace * is "safe". Threads are added to nodeThreads if suspended. - * + *

            * returns true if some live threads were found. The caller is expected to call this method * until no more "live" are found. */ diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/NetworkDisruption.java b/test/framework/src/main/java/org/opensearch/test/disruption/NetworkDisruption.java index e77b8f5b24897..62e19750a363b 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/NetworkDisruption.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/NetworkDisruption.java @@ -109,7 +109,7 @@ public void ensureHealthy(InternalTestCluster cluster) { /** * Ensures that all nodes in the cluster are connected to each other. - * + *

            * Some network disruptions may leave nodes that are not the cluster-manager disconnected from each other. * {@link org.opensearch.cluster.NodeConnectionsService} will eventually reconnect but it's * handy to be able to ensure this happens faster diff --git a/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java b/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java index 107e42ce43487..7462062a0cd46 100644 --- a/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java +++ b/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java @@ -57,13 +57,13 @@ * A gateway allocator implementation that keeps an in memory list of started shard allocation * that are used as replies to the, normally async, fetch data requests. The in memory list * is adapted when shards are started and failed. - * + *

            * Nodes leaving and joining the cluster do not change the list of shards the class tracks but * rather serves as a filter to what is returned by fetch data. Concretely - fetch data will * only return shards that were started on nodes that are currently part of the cluster. - * + *

            * For now only primary shard related data is fetched. Replica request always get an empty response. - * + *

            * * This class is useful to use in unit tests that require the functionality of {@link GatewayAllocator} but do * not have all the infrastructure required to use it. diff --git a/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestIssueLogging.java b/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestIssueLogging.java index f81c47b6a0a08..97fdcb796f195 100644 --- a/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestIssueLogging.java +++ b/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestIssueLogging.java @@ -43,7 +43,7 @@ /** * Annotation used to set a custom log level when investigating test failures. Do not use this annotation to explicitly * control the logging level in tests; instead, use {@link TestLogging}. - * + *

            * It supports multiple logger:level comma-separated key-value pairs of logger:level (e.g., * org.opensearch.cluster.metadata:TRACE). Use the _root keyword to set the root logger level. */ diff --git a/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestLogging.java b/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestLogging.java index d440fae409897..6c0b87ac67354 100644 --- a/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestLogging.java +++ b/test/framework/src/main/java/org/opensearch/test/junit/annotations/TestLogging.java @@ -43,7 +43,7 @@ /** * Annotation used to set a custom log level for controlling logging behavior in tests. Do not use this annotation when * investigating test failures; instead, use {@link TestIssueLogging}. - * + *

            * It supports multiple logger:level comma-separated key-value pairs of logger:level (e.g., * org.opensearch.cluster.metadata:TRACE). Use the _root keyword to set the root logger level. */ diff --git a/test/framework/src/main/java/org/opensearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/opensearch/test/junit/listeners/LoggingListener.java index 7c94c16b77471..ea2c5d055ed8b 100644 --- a/test/framework/src/main/java/org/opensearch/test/junit/listeners/LoggingListener.java +++ b/test/framework/src/main/java/org/opensearch/test/junit/listeners/LoggingListener.java @@ -53,7 +53,7 @@ * A {@link RunListener} that allows changing the log level for a specific test method. When a test method is annotated with the * {@link TestLogging} annotation, the level for the specified loggers will be internally saved before the test method execution and * overridden with the specified ones. At the end of the test method execution the original loggers levels will be restored. - * + *

            * This class is not thread-safe. Given the static nature of the logging API, it assumes that tests are never run concurrently in the same * JVM. For the very same reason no synchronization has been implemented regarding the save/restore process of the original loggers * levels. diff --git a/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java index dc13924195254..a77865579f3b3 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java @@ -103,7 +103,7 @@ protected void dispatchRequest(RestRequest request) { /** * A mocked {@link NodeClient} which can be easily reconfigured to verify arbitrary verification * functions, and can be reset to allow reconfiguration partway through a test without having to construct a new object. - * + *

            * By default, will throw {@link AssertionError} when any execution method is called, unless configured otherwise using * {@link #setExecuteVerifier(BiFunction)} or {@link #setExecuteLocallyVerifier(BiFunction)}. */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java index eeaa76b6ca1b3..3a80f25c5d4ab 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/DenylistedPathPatternMatcher.java @@ -35,7 +35,7 @@ /** * Matches denylist patterns. - * + *

            * Currently the following syntax is supported: * *

              diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java index 10fb1e52259a9..8e0bc03b08442 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/Features.java @@ -41,7 +41,7 @@ * Allows to register additional features supported by the tests runner. * This way any runner can add extra features and use proper skip sections to avoid * breaking others runners till they have implemented the new feature as well. - * + *

              * Once all runners have implemented the feature, it can be removed from the list * and the related skip sections can be removed from the tests as well. */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java index 03eb7292c5431..9012e4e4a59ff 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java @@ -78,7 +78,7 @@ /** * Runs a suite of yaml tests shared with all the official OpenSearch * clients against an opensearch cluster. - * + *

              * The suite timeout is extended to account for projects with a large number of tests. */ @TimeoutSuite(millis = 30 * TimeUnits.MINUTE) @@ -110,9 +110,9 @@ public abstract class OpenSearchClientYamlSuiteTestCase extends OpenSearchRestTe /** * This separator pattern matches ',' except it is preceded by a '\'. * This allows us to support ',' within paths when it is escaped with a slash. - * + *

              * For example, the path string "/a/b/c\,d/e/f,/foo/bar,/baz" is separated to "/a/b/c\,d/e/f", "/foo/bar" and "/baz". - * + *

              * For reference, this regular expression feature is known as zero-width negative look-behind. * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java index 429ad448f0655..59db1258d1082 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java @@ -82,7 +82,7 @@ /** * Represents a do section: - * + *

              * - do: * catch: missing * headers: diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java index 2132c2ebab51c..8e929eff44348 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/GreaterThanEqualToAssertion.java @@ -47,7 +47,7 @@ /** * Represents a gte assert section: - * + *

              * - gte: { fields._ttl: 0 } */ public class GreaterThanEqualToAssertion extends Assertion { diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsFalseAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsFalseAssertion.java index 6cfbbfc5df8d5..999486ad04455 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsFalseAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsFalseAssertion.java @@ -45,7 +45,7 @@ /** * Represents an is_false assert section: - * + *

              * - is_false: get.fields.bar * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsTrueAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsTrueAssertion.java index e746542d89126..bf5822406f014 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsTrueAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/IsTrueAssertion.java @@ -46,7 +46,7 @@ /** * Represents an is_true assert section: - * + *

              * - is_true: get.fields.bar * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java index 263e0c8fb9c42..d6e2ae1e23996 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanAssertion.java @@ -46,7 +46,7 @@ /** * Represents a lt assert section: - * + *

              * - lt: { fields._ttl: 20000} * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java index f0e7d0c01b8f0..ee46c04496f32 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/LessThanOrEqualToAssertion.java @@ -47,7 +47,7 @@ /** * Represents a lte assert section: - * + *

              * - lte: { fields._ttl: 0 } */ public class LessThanOrEqualToAssertion extends Assertion { diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/MatchAssertion.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/MatchAssertion.java index a97e3e4cb77ed..77d8f3154729e 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/MatchAssertion.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/MatchAssertion.java @@ -50,7 +50,7 @@ /** * Represents a match assert section: - * + *

              * - match: { get.fields._routing: "5" } * */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/SetSection.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/SetSection.java index a561a53119a96..c8004d9807cb9 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/SetSection.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/SetSection.java @@ -42,7 +42,7 @@ /** * Represents a set section: - * + *

              * - set: {_scroll_id: scroll_id} * */ diff --git a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java index de24ea0de77bb..dda413ce2818e 100644 --- a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java +++ b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java @@ -10,7 +10,9 @@ import org.opensearch.telemetry.Telemetry; import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.Counter; import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.telemetry.metrics.noop.NoopCounter; import org.opensearch.telemetry.tracing.TracingTelemetry; import org.opensearch.test.telemetry.tracing.MockTracingTelemetry; @@ -18,15 +20,12 @@ * Mock {@link Telemetry} implementation for testing. */ public class MockTelemetry implements Telemetry { - - private final TelemetrySettings settings; - /** * Constructor with settings. * @param settings telemetry settings. */ public MockTelemetry(TelemetrySettings settings) { - this.settings = settings; + } @Override @@ -37,6 +36,20 @@ public TracingTelemetry getTracingTelemetry() { @Override public MetricsTelemetry getMetricsTelemetry() { return new MetricsTelemetry() { + @Override + public Counter createCounter(String name, String description, String unit) { + return NoopCounter.INSTANCE; + } + + @Override + public Counter createUpDownCounter(String name, String description, String unit) { + return NoopCounter.INSTANCE; + } + + @Override + public void close() { + + } }; } } diff --git a/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java b/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java index 42a7b63a3d762..24aef714cc259 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java @@ -45,6 +45,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.CloseableConnection; import org.opensearch.transport.ClusterConnectionManager; @@ -80,7 +81,8 @@ public TransportService createTransportService( TransportInterceptor interceptor, Function localNodeFactory, @Nullable ClusterSettings clusterSettings, - Set taskHeaders + Set taskHeaders, + Tracer tracer ) { StubbableConnectionManager connectionManager = new StubbableConnectionManager(new ClusterConnectionManager(settings, this)); connectionManager.setDefaultNodeConnectedBehavior((cm, node) -> false); @@ -93,7 +95,8 @@ public TransportService createTransportService( localNodeFactory, clusterSettings, taskHeaders, - connectionManager + connectionManager, + tracer ); } diff --git a/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java index 0c459b2309b07..760bcbde8fb34 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java @@ -57,6 +57,8 @@ import org.opensearch.node.Node; import org.opensearch.plugins.Plugin; import org.opensearch.tasks.TaskManager; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.threadpool.ThreadPool; @@ -114,18 +116,19 @@ public List> getSettings() { } } - public static MockTransportService createNewService(Settings settings, Version version, ThreadPool threadPool) { - return createNewService(settings, version, threadPool, null); + public static MockTransportService createNewService(Settings settings, Version version, ThreadPool threadPool, Tracer tracer) { + return createNewService(settings, version, threadPool, null, tracer); } public static MockTransportService createNewService( Settings settings, Version version, ThreadPool threadPool, - @Nullable ClusterSettings clusterSettings + @Nullable ClusterSettings clusterSettings, + Tracer tracer ) { MockNioTransport mockTransport = newMockTransport(settings, version, threadPool); - return createNewService(settings, mockTransport, version, threadPool, clusterSettings, Collections.emptySet()); + return createNewService(settings, mockTransport, version, threadPool, clusterSettings, Collections.emptySet(), tracer); } public static MockNioTransport newMockTransport(Settings settings, Version version, ThreadPool threadPool) { @@ -138,7 +141,8 @@ public static MockNioTransport newMockTransport(Settings settings, Version versi new NetworkService(Collections.emptyList()), new MockPageCacheRecycler(settings), namedWriteableRegistry, - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ); } @@ -148,9 +152,10 @@ public static MockTransportService createNewService( Version version, ThreadPool threadPool, @Nullable ClusterSettings clusterSettings, - Set taskHeaders + Set taskHeaders, + Tracer tracer ) { - return createNewService(settings, transport, version, threadPool, clusterSettings, taskHeaders, NOOP_TRANSPORT_INTERCEPTOR); + return createNewService(settings, transport, version, threadPool, clusterSettings, taskHeaders, NOOP_TRANSPORT_INTERCEPTOR, tracer); } public static MockTransportService createNewService( @@ -160,7 +165,8 @@ public static MockTransportService createNewService( ThreadPool threadPool, @Nullable ClusterSettings clusterSettings, Set taskHeaders, - TransportInterceptor interceptor + TransportInterceptor interceptor, + Tracer tracer ) { return new MockTransportService( settings, @@ -176,7 +182,8 @@ public static MockTransportService createNewService( version ), clusterSettings, - taskHeaders + taskHeaders, + tracer ); } @@ -194,7 +201,8 @@ public MockTransportService( Transport transport, ThreadPool threadPool, TransportInterceptor interceptor, - @Nullable ClusterSettings clusterSettings + @Nullable ClusterSettings clusterSettings, + Tracer tracer ) { this( settings, @@ -207,7 +215,8 @@ public MockTransportService( settings.get(Node.NODE_NAME_SETTING.getKey(), UUIDs.randomBase64UUID()) ), clusterSettings, - Collections.emptySet() + Collections.emptySet(), + tracer ); } @@ -225,9 +234,10 @@ public MockTransportService( TransportInterceptor interceptor, Function localNodeFactory, @Nullable ClusterSettings clusterSettings, - Set taskHeaders + Set taskHeaders, + Tracer tracer ) { - this(settings, new StubbableTransport(transport), threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders); + this(settings, new StubbableTransport(transport), threadPool, interceptor, localNodeFactory, clusterSettings, taskHeaders, tracer); } private MockTransportService( @@ -237,7 +247,8 @@ private MockTransportService( TransportInterceptor interceptor, Function localNodeFactory, @Nullable ClusterSettings clusterSettings, - Set taskHeaders + Set taskHeaders, + Tracer tracer ) { super( settings, @@ -247,7 +258,8 @@ private MockTransportService( localNodeFactory, clusterSettings, taskHeaders, - new StubbableConnectionManager(new ClusterConnectionManager(settings, transport)) + new StubbableConnectionManager(new ClusterConnectionManager(settings, transport)), + tracer ); this.original = transport.getDelegate(); } diff --git a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java index ec34cd0d8062b..7baa90ede4012 100644 --- a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java @@ -67,6 +67,7 @@ import org.opensearch.core.transport.TransportResponse; import org.opensearch.node.Node; import org.opensearch.tasks.Task; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -228,7 +229,8 @@ private MockTransportService buildService( threadPool, clusterSettings, Collections.emptySet(), - interceptor + interceptor, + NoopTracer.INSTANCE ); service.start(); if (acceptRequests) { diff --git a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java index 5795f860efa7e..cd6bf02efef6f 100644 --- a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java @@ -65,6 +65,7 @@ import org.opensearch.nio.NioSocketChannel; import org.opensearch.nio.Page; import org.opensearch.nio.ServerChannelContext; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ConnectionProfile; import org.opensearch.transport.InboundPipeline; @@ -110,9 +111,10 @@ public MockNioTransport( NetworkService networkService, PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, - CircuitBreakerService circuitBreakerService + CircuitBreakerService circuitBreakerService, + Tracer tracer ) { - super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); + super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService, tracer); this.transportThreadWatchdog = new TransportThreadWatchdog(threadPool, settings); } diff --git a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransportPlugin.java b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransportPlugin.java index a596bdd5e419f..deb489614be26 100644 --- a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransportPlugin.java +++ b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransportPlugin.java @@ -39,6 +39,7 @@ import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; @@ -57,7 +58,8 @@ public Map> getTransports( PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, - NetworkService networkService + NetworkService networkService, + Tracer tracer ) { return Collections.singletonMap( MOCK_NIO_TRANSPORT_NAME, @@ -68,7 +70,8 @@ public Map> getTransports( networkService, pageCacheRecycler, namedWriteableRegistry, - circuitBreakerService + circuitBreakerService, + tracer ) ); } diff --git a/test/framework/src/test/java/org/opensearch/common/hash/HashFunctionTestCaseTests.java b/test/framework/src/test/java/org/opensearch/common/hash/HashFunctionTestCaseTests.java new file mode 100644 index 0000000000000..d5fdaf10999fc --- /dev/null +++ b/test/framework/src/test/java/org/opensearch/common/hash/HashFunctionTestCaseTests.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import org.apache.lucene.util.StringHelper; +import org.opensearch.test.OpenSearchTestCase; + +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; +import java.nio.ByteOrder; +import java.util.Arrays; + +public class HashFunctionTestCaseTests extends OpenSearchTestCase { + private static final VarHandle INT_HANDLE = MethodHandles.byteArrayViewVarHandle(int[].class, ByteOrder.LITTLE_ENDIAN); + + /** + * Asserts the positive case where a hash function passes the avalanche test. + */ + public void testStrongHashFunction() { + HashFunctionTestCase murmur3 = new HashFunctionTestCase() { + private final byte[] scratch = new byte[4]; + + @Override + public byte[] hash(byte[] input) { + int hash = StringHelper.murmurhash3_x86_32(input, 0, input.length, StringHelper.GOOD_FAST_HASH_SEED); + INT_HANDLE.set(scratch, 0, hash); + return scratch; + } + + @Override + public int outputBits() { + return 32; + } + }; + + murmur3.testAvalanche(); + } + + /** + * Asserts the negative case where a hash function fails the avalanche test. + */ + public void testWeakHashFunction() { + HashFunctionTestCase arraysHashCode = new HashFunctionTestCase() { + private final byte[] scratch = new byte[4]; + + @Override + public byte[] hash(byte[] input) { + int hash = Arrays.hashCode(input); + INT_HANDLE.set(scratch, 0, hash); + return scratch; + } + + @Override + public int outputBits() { + return 32; + } + }; + + AssertionError ex = expectThrows(AssertionError.class, arraysHashCode::testAvalanche); + assertTrue(ex.getMessage().contains("bias exceeds threshold")); + } +} diff --git a/test/framework/src/test/java/org/opensearch/test/disruption/DisruptableMockTransportTests.java b/test/framework/src/test/java/org/opensearch/test/disruption/DisruptableMockTransportTests.java index 516cfe1636bf7..6b64270ca68e1 100644 --- a/test/framework/src/test/java/org/opensearch/test/disruption/DisruptableMockTransportTests.java +++ b/test/framework/src/test/java/org/opensearch/test/disruption/DisruptableMockTransportTests.java @@ -43,6 +43,7 @@ import org.opensearch.core.transport.TransportResponse; import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.node.Node; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport.ConnectionStatus; import org.opensearch.threadpool.ThreadPool; @@ -161,7 +162,8 @@ protected void execute(Runnable runnable) { NOOP_TRANSPORT_INTERCEPTOR, a -> node1, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); service2 = transport2.createTransportService( Settings.EMPTY, @@ -169,7 +171,8 @@ protected void execute(Runnable runnable) { NOOP_TRANSPORT_INTERCEPTOR, a -> node2, null, - Collections.emptySet() + Collections.emptySet(), + NoopTracer.INSTANCE ); service1.start(); diff --git a/test/framework/src/test/java/org/opensearch/transport/nio/SimpleMockNioTransportTests.java b/test/framework/src/test/java/org/opensearch/transport/nio/SimpleMockNioTransportTests.java index fb77161a02aef..ce401ad99fad7 100644 --- a/test/framework/src/test/java/org/opensearch/transport/nio/SimpleMockNioTransportTests.java +++ b/test/framework/src/test/java/org/opensearch/transport/nio/SimpleMockNioTransportTests.java @@ -42,6 +42,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.transport.AbstractSimpleTransportTestCase; import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.ConnectionProfile; @@ -71,7 +72,8 @@ protected Transport build(Settings settings, final Version version, ClusterSetti networkService, new MockPageCacheRecycler(settings), namedWriteableRegistry, - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + NoopTracer.INSTANCE ) { @Override diff --git a/test/telemetry/build.gradle b/test/telemetry/build.gradle index fbabe43aa5e5a..ca523a9204f4c 100644 --- a/test/telemetry/build.gradle +++ b/test/telemetry/build.gradle @@ -13,6 +13,7 @@ apply plugin: 'opensearch.build' apply plugin: 'opensearch.publish' dependencies { + api project(":libs:opensearch-core") api project(":libs:opensearch-common") api project(":libs:opensearch-telemetry") } diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java index c22a395a6e17c..c5d179f6412a8 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java @@ -10,6 +10,8 @@ import org.opensearch.telemetry.tracing.AbstractSpan; import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanCreationContext; +import org.opensearch.telemetry.tracing.SpanKind; import org.opensearch.telemetry.tracing.attributes.Attributes; import java.util.HashMap; @@ -29,6 +31,7 @@ public class MockSpan extends AbstractSpan { private boolean hasEnded; private final Long startTime; private Long endTime; + private final SpanKind spanKind; private final Object lock = new Object(); @@ -36,49 +39,43 @@ public class MockSpan extends AbstractSpan { /** * Base Constructor. - * @param spanName span name - * @param parentSpan parent span - * @param spanProcessor span processor - * @param attributes attributes + * + * @param spanCreationContext Span Creation context. + * @param parentSpan Parent Span + * @param spanProcessor Span Processor */ - public MockSpan(String spanName, Span parentSpan, SpanProcessor spanProcessor, Attributes attributes) { + public MockSpan(SpanCreationContext spanCreationContext, Span parentSpan, SpanProcessor spanProcessor) { this( - spanName, + spanCreationContext.getSpanName(), parentSpan, parentSpan != null ? parentSpan.getTraceId() : IdGenerator.generateTraceId(), IdGenerator.generateSpanId(), spanProcessor, - attributes - ); - } - - /** - * Constructor. - * @param spanName span name. - * @param parentSpan parent span name - * @param spanProcessor span processor. - */ - public MockSpan(String spanName, Span parentSpan, SpanProcessor spanProcessor) { - this( - spanName, - parentSpan, - parentSpan != null ? parentSpan.getTraceId() : IdGenerator.generateTraceId(), - IdGenerator.generateSpanId(), - spanProcessor, - Attributes.EMPTY + spanCreationContext.getAttributes(), + SpanKind.INTERNAL ); } /** * Constructor with traceId and SpanIds - * @param spanName Span Name - * @param parentSpan Parent Span - * @param traceId Trace ID - * @param spanId Span ID - * @param spanProcessor Span Processor - * @param attributes attributes + * + * @param spanName Span Name + * @param parentSpan Parent Span + * @param traceId Trace ID + * @param spanId Span ID + * @param spanProcessor Span Processor + * @param attributes attributes + * @param spanKind type of span. */ - public MockSpan(String spanName, Span parentSpan, String traceId, String spanId, SpanProcessor spanProcessor, Attributes attributes) { + public MockSpan( + String spanName, + Span parentSpan, + String traceId, + String spanId, + SpanProcessor spanProcessor, + Attributes attributes, + SpanKind spanKind + ) { super(spanName, parentSpan); this.spanProcessor = spanProcessor; this.metadata = new HashMap<>(); @@ -88,6 +85,7 @@ public MockSpan(String spanName, Span parentSpan, String traceId, String spanId, if (attributes != null) { this.metadata.putAll(attributes.getAttributesMap()); } + this.spanKind = spanKind; } @Override @@ -168,7 +166,9 @@ public Long getEndTime() { } public void setError(Exception exception) { - putMetadata("ERROR", exception.getMessage()); + if (exception != null) { + putMetadata("ERROR", exception.getMessage()); + } } private static class IdGenerator { @@ -194,4 +194,12 @@ private static String generateTraceId() { public Object getAttribute(String key) { return metadata.get(key); } + + /** + * Returns the attributes as map. + * @return returns the attributes map. + */ + public Map getAttributes() { + return metadata; + } } diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpanData.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpanData.java index bc71d097ac28b..0658a6421f3f3 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpanData.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpanData.java @@ -9,6 +9,7 @@ package org.opensearch.test.telemetry.tracing; import java.util.Arrays; +import java.util.Map; /** * MockSpanData model for storing Telemetry information for testing. @@ -17,6 +18,7 @@ public class MockSpanData { /** * MockSpanData constructor with spanID, parentSpanID, traceID, startEpochNanos, endEpochNanos, hasEnded params. + * * @param spanID spanID * @param parentSpanID spanID of the parentSpan * @param traceID traceID of the request @@ -24,6 +26,7 @@ public class MockSpanData { * @param endEpochNanos endTime of span in epochNanos * @param hasEnded value if the span is closed * @param spanName Name of the span emitted + * @param attributes span attributes */ public MockSpanData( String spanID, @@ -32,7 +35,8 @@ public MockSpanData( long startEpochNanos, long endEpochNanos, boolean hasEnded, - String spanName + String spanName, + Map attributes ) { this.spanID = spanID; this.traceID = traceID; @@ -41,10 +45,12 @@ public MockSpanData( this.endEpochNanos = endEpochNanos; this.hasEnded = hasEnded; this.spanName = spanName; + this.attributes = attributes; } /** * MockSpanData constructor with spanID, parentSpanID, traceID, startEpochNanos, hasEnded and spanName params. + * * @param spanID spanID * @param parentSpanID spanID of the parentSpan * @param traceID traceID of the request @@ -52,6 +58,7 @@ public MockSpanData( * @param hasEnded value if the span is closed * @param spanName Name of the span emitted * @param stackTrace StackTrace to debug the problematic span + * @param attributes span attributes */ public MockSpanData( String spanID, @@ -60,7 +67,8 @@ public MockSpanData( long startEpochNanos, boolean hasEnded, String spanName, - StackTraceElement[] stackTrace + StackTraceElement[] stackTrace, + Map attributes ) { this.spanID = spanID; this.traceID = traceID; @@ -69,6 +77,7 @@ public MockSpanData( this.hasEnded = hasEnded; this.spanName = spanName; this.stackTrace = stackTrace; + this.attributes = attributes; } private final String spanID; @@ -79,6 +88,7 @@ public MockSpanData( private final long startEpochNanos; private long endEpochNanos; private boolean hasEnded; + private Map attributes; private StackTraceElement[] stackTrace; @@ -147,6 +157,14 @@ public void setHasEnded(boolean hasEnded) { this.hasEnded = hasEnded; } + /** + * Returns the attributes + * @return returns the attributes map. + */ + public Map getAttributes() { + return attributes; + } + @Override public String toString() { return "MockSpanData{" @@ -168,6 +186,8 @@ public String toString() { + endEpochNanos + ", hasEnded=" + hasEnded + + ", attributes=" + + attributes + ", stackTrace=" + Arrays.toString(stackTrace) + '}'; diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java index dccf062df5ca5..6d0cd6d0b1290 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java @@ -8,13 +8,18 @@ package org.opensearch.test.telemetry.tracing; +import org.opensearch.core.common.Strings; import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanKind; import org.opensearch.telemetry.tracing.TracingContextPropagator; import org.opensearch.telemetry.tracing.attributes.Attributes; +import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.function.BiConsumer; +import java.util.stream.Collectors; /** * Mock {@link TracingContextPropagator} to persist the span for internode communication. @@ -34,18 +39,31 @@ public MockTracingContextPropagator(SpanProcessor spanProcessor) { } @Override - public Span extract(Map props) { + public Optional extract(Map props) { String value = props.get(TRACE_PARENT); if (value != null) { String[] values = value.split(SEPARATOR); String traceId = values[0]; String spanId = values[1]; - return new MockSpan(null, null, traceId, spanId, spanProcessor, Attributes.EMPTY); + return Optional.of(new MockSpan(null, null, traceId, spanId, spanProcessor, Attributes.EMPTY, SpanKind.INTERNAL)); } else { - return null; + return Optional.empty(); } } + @Override + public Optional extractFromHeaders(Map> headers) { + if (headers != null) { + Map convertedHeader = headers.entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> Strings.collectionToCommaDelimitedString(e.getValue()))); + return extract(convertedHeader); + } else { + return Optional.empty(); + } + + } + @Override public void inject(Span currentSpan, BiConsumer setter) { if (currentSpan instanceof MockSpan) { diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java index 9b958bbb40f84..39817a208bd18 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java @@ -9,14 +9,11 @@ package org.opensearch.test.telemetry.tracing; import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanCreationContext; import org.opensearch.telemetry.tracing.TracingContextPropagator; import org.opensearch.telemetry.tracing.TracingTelemetry; -import org.opensearch.telemetry.tracing.attributes.Attributes; -import org.opensearch.test.telemetry.tracing.validators.AllSpansAreEndedProperly; -import org.opensearch.test.telemetry.tracing.validators.AllSpansHaveUniqueId; -import java.util.Arrays; -import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; /** * Mock {@link TracingTelemetry} implementation for testing. @@ -24,18 +21,19 @@ public class MockTracingTelemetry implements TracingTelemetry { private final SpanProcessor spanProcessor = new StrictCheckSpanProcessor(); + private final AtomicBoolean shutdown = new AtomicBoolean(false); /** * Base constructor. */ - public MockTracingTelemetry() { - - } + public MockTracingTelemetry() {} @Override - public Span createSpan(String spanName, Span parentSpan, Attributes attributes) { - Span span = new MockSpan(spanName, parentSpan, spanProcessor, attributes); - spanProcessor.onStart(span); + public Span createSpan(SpanCreationContext spanCreationContext, Span parentSpan) { + Span span = new MockSpan(spanCreationContext, parentSpan, spanProcessor); + if (shutdown.get() == false) { + spanProcessor.onStart(span); + } return span; } @@ -46,12 +44,7 @@ public TracingContextPropagator getContextPropagator() { @Override public void close() { - List spanData = ((StrictCheckSpanProcessor) spanProcessor).getFinishedSpanItems(); - if (spanData.size() != 0) { - TelemetryValidators validators = new TelemetryValidators( - Arrays.asList(new AllSpansAreEndedProperly(), new AllSpansHaveUniqueId()) - ); - validators.validate(spanData, 1); - } + shutdown.set(true); } + } diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java index e3fca8813b696..f7ebb3ee18a9b 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java @@ -9,8 +9,11 @@ package org.opensearch.test.telemetry.tracing; import org.opensearch.telemetry.tracing.Span; +import org.opensearch.test.telemetry.tracing.validators.AllSpansAreEndedProperly; +import org.opensearch.test.telemetry.tracing.validators.AllSpansHaveUniqueId; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -57,8 +60,28 @@ private MockSpanData toMockSpanData(Span span) { System.nanoTime(), false, span.getSpanName(), - Thread.currentThread().getStackTrace() + Thread.currentThread().getStackTrace(), + (span instanceof MockSpan) ? ((MockSpan) span).getAttributes() : Map.of() ); return spanData; } + + /** + * Ensures the strict check succeeds for all the spans. + */ + public static void validateTracingStateOnShutdown() { + List spanData = new ArrayList<>(spanMap.values()); + if (spanData.size() != 0) { + TelemetryValidators validators = new TelemetryValidators( + Arrays.asList(new AllSpansAreEndedProperly(), new AllSpansHaveUniqueId()) + ); + try { + validators.validate(spanData, 1); + } catch (Error e) { + spanMap.clear(); + throw e; + } + } + + } } diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/NumberOfTraceIDsEqualToRequests.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/NumberOfTraceIDsEqualToRequests.java index 3e18e4b873557..5fe268a8f0581 100644 --- a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/NumberOfTraceIDsEqualToRequests.java +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/validators/NumberOfTraceIDsEqualToRequests.java @@ -8,11 +8,13 @@ package org.opensearch.test.telemetry.tracing.validators; +import org.opensearch.telemetry.tracing.attributes.Attributes; import org.opensearch.test.telemetry.tracing.MockSpanData; import org.opensearch.test.telemetry.tracing.TracingValidator; import java.util.ArrayList; import java.util.List; +import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; @@ -21,10 +23,16 @@ */ public class NumberOfTraceIDsEqualToRequests implements TracingValidator { + private static final String FILTERING_ATTRIBUTE = "action"; + private final Attributes attributes; + /** - * Base Constructor + * Constructor. + * @param attributes attributes. */ - public NumberOfTraceIDsEqualToRequests() {} + public NumberOfTraceIDsEqualToRequests(Attributes attributes) { + this.attributes = attributes; + } /** * validates if all spans emitted for a particular request have same traceID. @@ -33,11 +41,25 @@ public NumberOfTraceIDsEqualToRequests() {} */ @Override public List validate(List spans, int requests) { - Set totalTraceIDs = spans.stream().map(MockSpanData::getTraceID).collect(Collectors.toSet()); + Set totalTraceIDs = spans.stream() + .filter(span -> isMatchingSpan(span)) + .map(MockSpanData::getTraceID) + .collect(Collectors.toSet()); List problematicSpans = new ArrayList<>(); if (totalTraceIDs.size() != requests) { problematicSpans.addAll(spans); } return problematicSpans; } + + private boolean isMatchingSpan(MockSpanData mockSpanData) { + if (attributes.getAttributesMap().isEmpty()) { + return true; + } else { + return Objects.equals( + mockSpanData.getAttributes().get(FILTERING_ATTRIBUTE), + attributes.getAttributesMap().get(FILTERING_ATTRIBUTE) + ); + } + } }