diff --git a/.github/actions/docker_setup/action.yml b/.github/actions/docker_setup/action.yml new file mode 100644 index 000000000000..2748ce1aecdb --- /dev/null +++ b/.github/actions/docker_setup/action.yml @@ -0,0 +1,38 @@ +name: Docker setup +description: Setup docker +inputs: + nested_job: + description: the fuse for unintended use inside of the reusable callable jobs + default: true + type: boolean + DOCKER_USERNAME: + description: username for the dockerhub login + required: true + type: string + DOCKER_PASSWORD: + description: password for the dockerhub login + required: true + type: string +runs: + using: "composite" + steps: + - name: Docker IPv6 configuration + shell: bash + run: | + # make sure docker uses proper IPv6 config + sudo touch /etc/docker/daemon.json + sudo chown ubuntu:ubuntu /etc/docker/daemon.json + sudo cat < /etc/docker/daemon.json + { + "ipv6": true, + "fixed-cidr-v6": "2001:3984:3989::/64" + } + EOT + sudo chown root:root /etc/docker/daemon.json + sudo systemctl restart docker + sudo systemctl status docker + - name: Docker login + shell: bash + run: | + docker login -u ${{ inputs.DOCKER_USERNAME }} -p ${{ inputs.DOCKER_PASSWORD }} + docker info diff --git a/.github/retry.sh b/.github/retry.sh new file mode 100755 index 000000000000..566c2cf11315 --- /dev/null +++ b/.github/retry.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Execute command until exitcode is 0 or +# maximum number of retries is reached +# Example: +# ./retry +retries=$1 +delay=$2 +command="${@:3}" +exitcode=0 +try=0 +until [ "$try" -ge $retries ] +do + echo "$command" + eval "$command" + exitcode=$? + if [ $exitcode -eq 0 ]; then + break + fi + try=$((try+1)) + sleep $2 +done +exit $exitcode diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml index 21c5a548354b..55e4a4087263 100644 --- a/.github/workflows/backport_branches.yml +++ b/.github/workflows/backport_branches.yml @@ -173,13 +173,13 @@ jobs: clear-repository: true fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself filter: tree:0 - - name: Check docker clickhouse/clickhouse-server building + - name: Check docker altinityinfra/clickhouse-server building run: | cd "$GITHUB_WORKSPACE/tests/ci" python3 docker_server.py --release-type head --no-push \ - --image-repo clickhouse/clickhouse-server --image-path docker/server + --image-repo altinityinfra/clickhouse-server --image-path docker/server python3 docker_server.py --release-type head --no-push \ - --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper + --image-repo altinityinfra/clickhouse-keeper --image-path docker/keeper - name: Cleanup if: always() run: | diff --git a/.github/workflows/cherry_pick.yml b/.github/workflows/cherry_pick.yml deleted file mode 100644 index 8d1e20559780..000000000000 --- a/.github/workflows/cherry_pick.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: CherryPick - -env: - # Force the stdout and stderr streams to be unbuffered - PYTHONUNBUFFERED: 1 - -concurrency: - group: cherry-pick -on: # yamllint disable-line rule:truthy - schedule: - - cron: '0 * * * *' - workflow_dispatch: - -jobs: - CherryPick: - runs-on: [self-hosted, style-checker-aarch64] - steps: - - name: Set envs - # https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings - run: | - cat >> "$GITHUB_ENV" << 'EOF' - TEMP_PATH=${{runner.temp}}/cherry_pick - ROBOT_CLICKHOUSE_SSH_KEY<> "$GITHUB_PATH" - - name: Download and set up build-wrapper - env: - BUILD_WRAPPER_DOWNLOAD_URL: ${{ env.SONAR_SERVER_URL }}/static/cpp/build-wrapper-linux-x86.zip - run: | - curl -sSLo "$HOME/.sonar/build-wrapper-linux-x86.zip" "${{ env.BUILD_WRAPPER_DOWNLOAD_URL }}" - unzip -o "$HOME/.sonar/build-wrapper-linux-x86.zip" -d "$HOME/.sonar/" - echo "$HOME/.sonar/build-wrapper-linux-x86" >> "$GITHUB_PATH" - - name: Set Up Build Tools - run: | - sudo apt-get update - sudo apt-get install -yq git cmake ccache ninja-build python3 yasm nasm - sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)" - - name: Run build-wrapper - run: | - mkdir build - cd build - cmake .. - cd .. - build-wrapper-linux-x86-64 --out-dir ${{ env.BUILD_WRAPPER_OUT_DIR }} cmake --build build/ - - name: Run sonar-scanner - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} - run: | - sonar-scanner \ - --define sonar.host.url="${{ env.SONAR_SERVER_URL }}" \ - --define sonar.cfamily.build-wrapper-output="${{ env.BUILD_WRAPPER_OUT_DIR }}" \ - --define sonar.projectKey="ClickHouse_ClickHouse" \ - --define sonar.organization="clickhouse-java" \ - --define sonar.cfamily.cpp23.enabled=true \ - --define sonar.exclusions="**/*.java,**/*.ts,**/*.js,**/*.css,**/*.sql" diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml deleted file mode 100644 index 96a35b20c5b6..000000000000 --- a/.github/workflows/pull_request.yml +++ /dev/null @@ -1,1087 +0,0 @@ -# yamllint disable rule:comments-indentation -name: PullRequestCI - -env: - # Force the stdout and stderr streams to be unbuffered - PYTHONUNBUFFERED: 1 - -on: # yamllint disable-line rule:truthy - pull_request: - types: - - synchronize - - reopened - - opened - branches: - - master - paths-ignore: - - 'CHANGELOG.md' - - 'README.md' - - 'SECURITY.md' - - 'docker/docs/**' - - 'docs/**' - - 'utils/check-style/aspell-ignore/**' - - 'tests/ci/docs_check.py' - - '.github/workflows/docs_check.yml' -########################################################################################## -##################################### SMALL CHECKS ####################################### -########################################################################################## -jobs: - CheckLabels: - runs-on: [self-hosted, style-checker] - # Run the first check always, even if the CI is cancelled - if: ${{ always() }} - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Labels check - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 run_check.py - PythonUnitTests: - runs-on: [self-hosted, style-checker] - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Python unit tests - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - echo "Testing the main ci directory" - python3 -m unittest discover -s . -p '*_test.py' - for dir in *_lambda/; do - echo "Testing $dir" - python3 -m unittest discover -s "$dir" -p '*_test.py' - done - DockerHubPushAarch64: - needs: CheckLabels - runs-on: [self-hosted, style-checker-aarch64] - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Images check - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 docker_images_check.py --suffix aarch64 - - name: Upload images files to artifacts - uses: actions/upload-artifact@v3 - with: - name: changed_images_aarch64 - path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json - DockerHubPushAmd64: - needs: CheckLabels - runs-on: [self-hosted, style-checker] - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - - name: Images check - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 docker_images_check.py --suffix amd64 - - name: Upload images files to artifacts - uses: actions/upload-artifact@v3 - with: - name: changed_images_amd64 - path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json - DockerHubPush: - needs: [DockerHubPushAmd64, DockerHubPushAarch64, PythonUnitTests] - runs-on: [self-hosted, style-checker] - steps: - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags - filter: tree:0 - - name: Download changed aarch64 images - uses: actions/download-artifact@v3 - with: - name: changed_images_aarch64 - path: ${{ runner.temp }} - - name: Download changed amd64 images - uses: actions/download-artifact@v3 - with: - name: changed_images_amd64 - path: ${{ runner.temp }} - - name: Images check - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 - - name: Upload images files to artifacts - uses: actions/upload-artifact@v3 - with: - name: changed_images - path: ${{ runner.temp }}/changed_images.json - StyleCheck: - needs: DockerHubPush - # We need additional `&& ! cancelled()` to have the job being able to cancel - if: ${{ success() || failure() || ( always() && ! cancelled() ) }} - uses: ./.github/workflows/reusable_test.yml - with: - test_name: Style check - runner_type: style-checker - run_command: | - cd "$REPO_COPY/tests/ci" - python3 style_check.py - secrets: - secret_envs: | - ROBOT_CLICKHOUSE_SSH_KEY<> ${GITHUB_OUTPUT} + env: + input: ${{ inputs.runner_type }} + + Common: + strategy: + fail-fast: false + matrix: + SUITE: [aes_encryption, aggregate_functions, atomic_insert, base_58, clickhouse_keeper, data_types, datetime64_extended_range, disk_level_encryption, dns, example, extended_precision_data_types, kafka, kerberos, key_value, lightweight_delete, part_moves_between_shards, rbac, selects, session_timezone, ssl_server, tiered_storage, window_functions] + needs: [runner_labels_setup] + runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }} + timeout-minutes: ${{ inputs.timeout_minutes }} + steps: + - name: Checkout regression repo + uses: actions/checkout@v3 + with: + repository: Altinity/clickhouse-regression + ref: ${{ inputs.commit }} + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + REPORTS_PATH=${{ runner.temp }}/reports_dir + SUITE=${{ matrix.SUITE }} + EOF + - name: Download json reports + uses: actions/download-artifact@v3 + with: + path: ${{ env.REPORTS_PATH }} + name: build_report_package_${{ inputs.arch }} + - name: Setup + run: .github/setup.sh + - name: Get deb url + run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV + - name: Run ${{ env.SUITE }} suite + run: python3 + -u ${{ env.SUITE }}/regression.py + --clickhouse-binary-path ${{ env.clickhouse_binary_path }} + --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_binary_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.id="$GITHUB_RUN_ID" job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)" + ${{ env.args }} + - name: Create and upload logs + if: always() + run: .github/create_and_upload_logs.sh 1 + - uses: actions/upload-artifact@v3 + if: always() + with: + name: ${{ env.SUITE }}-${{ inputs.arch }}-artifacts + path: ${{ env.artifact_paths}} + + Benchmark: + strategy: + fail-fast: false + matrix: + STORAGE: [minio, aws_s3, gcs] + needs: [runner_labels_setup] + runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }} + timeout-minutes: ${{ inputs.timeout_minutes }} + steps: + - name: Checkout regression repo + uses: actions/checkout@v3 + with: + repository: Altinity/clickhouse-regression + ref: ${{ inputs.commit }} + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + REPORTS_PATH=${{ runner.temp }}/reports_dir + SUITE=ontime_benchmark + STORAGE=/${{ matrix.STORAGE }} + EOF + - name: Download json reports + uses: actions/download-artifact@v3 + with: + path: ${{ env.REPORTS_PATH }} + name: build_report_package_${{ inputs.arch }} + - name: Setup + run: .github/setup.sh + - name: Get deb url + run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV + - name: Run ${{ env.SUITE }} suite + run: python3 + -u ${{ env.SUITE }}/benchmark.py + --clickhouse-binary-path ${{ env.clickhouse_binary_path }} + --storage ${{ matrix.STORAGE }} + --gcs-uri ${{ secrets.REGRESSION_GCS_URI }} + --gcs-key-id ${{ secrets.REGRESSION_GCS_KEY_ID }} + --gcs-key-secret ${{ secrets.REGRESSION_GCS_KEY_SECRET }} + --aws-s3-bucket ${{ secrets.REGRESSION_AWS_S3_BUCKET }} + --aws-s3-region ${{ secrets.REGRESSION_AWS_S3_REGION }} + --aws-s3-key-id ${{ secrets.REGRESSION_AWS_S3_KEY_ID }} + --aws-s3-access-key ${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }} + --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_binary_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.id="$GITHUB_RUN_ID" job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)" + ${{ env.args }} + - name: Create and upload logs + if: always() + run: .github/create_and_upload_logs.sh 1 + - uses: actions/upload-artifact@v3 + if: always() + with: + name: benchmark-${{ matrix.STORAGE }}-${{ inputs.arch }}-artifacts + path: ${{ env.artifact_paths }} + + ClickHouseKeeperSSL: + needs: [runner_labels_setup] + runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }} + timeout-minutes: ${{ inputs.timeout_minutes }} + steps: + - name: Checkout regression repo + uses: actions/checkout@v3 + with: + repository: Altinity/clickhouse-regression + ref: ${{ inputs.commit }} + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + REPORTS_PATH=${{runner.temp}}/reports_dir + SUITE=clickhouse_keeper + STORAGE=/ssl + EOF + - name: Download json reports + uses: actions/download-artifact@v3 + with: + path: ${{ env.REPORTS_PATH }} + name: build_report_package_${{ inputs.arch }} + - name: Setup + run: .github/setup.sh + - name: Get deb url + run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV + - name: Run ${{ env.SUITE }} suite + run: python3 + -u ${{ env.SUITE }}/regression.py + --ssl + --clickhouse-binary-path ${{ env.clickhouse_binary_path }} + --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_binary_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.id="$GITHUB_RUN_ID" job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)" + ${{ env.args }} + - name: Create and upload logs + if: always() + run: .github/create_and_upload_logs.sh 1 + - uses: actions/upload-artifact@v3 + if: always() + with: + name: ${{ env.SUITE }}-${{ inputs.arch }}-ssl-artifacts + path: ${{ env.artifact_paths }} + + LDAP: + strategy: + fail-fast: false + matrix: + SUITE: [authentication, external_user_directory, role_mapping] + needs: [runner_labels_setup] + runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }} + timeout-minutes: ${{ inputs.timeout_minutes }} + steps: + - name: Checkout regression repo + uses: actions/checkout@v3 + with: + repository: Altinity/clickhouse-regression + ref: ${{ inputs.commit }} + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + REPORTS_PATH=${{ runner.temp }}/reports_dir + SUITE=ldap/${{ matrix.SUITE }} + EOF + - name: Download json reports + uses: actions/download-artifact@v3 + with: + path: ${{ env.REPORTS_PATH }} + name: build_report_package_${{ inputs.arch }} + - name: Setup + run: .github/setup.sh + - name: Get deb url + run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV + - name: Run ${{ env.SUITE }} suite + run: python3 + -u ${{ env.SUITE }}/regression.py + --clickhouse-binary-path ${{ env.clickhouse_binary_path }} + --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_binary_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.id="$GITHUB_RUN_ID" job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)" + ${{ env.args }} + - name: Create and upload logs + if: always() + run: .github/create_and_upload_logs.sh 1 + - uses: actions/upload-artifact@v3 + if: always() + with: + name: ldap-${{ matrix.SUITE }}-${{ inputs.arch }}-artifacts + path: ${{ env.artifact_paths }} + + Parquet: + needs: [runner_labels_setup] + runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }} + timeout-minutes: ${{ inputs.timeout_minutes }} + steps: + - name: Checkout regression repo + uses: actions/checkout@v3 + with: + repository: Altinity/clickhouse-regression + ref: ${{ inputs.commit }} + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + REPORTS_PATH=${{ runner.temp }}/reports_dir + SUITE=parquet + EOF + - name: Download json reports + uses: actions/download-artifact@v3 + with: + path: ${{ env.REPORTS_PATH }} + name: build_report_package_${{ inputs.arch }} + - name: Setup + run: .github/setup.sh + - name: Get deb url + run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV + - name: Run ${{ env.SUITE }} suite + run: python3 + -u ${{ env.SUITE }}/regression.py + --clickhouse-binary-path ${{ env.clickhouse_binary_path }} + --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_binary_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.id="$GITHUB_RUN_ID" job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)" + ${{ env.args }} + - name: Create and upload logs + if: always() + run: .github/create_and_upload_logs.sh 1 + - uses: actions/upload-artifact@v3 + if: always() + with: + name: ${{ env.SUITE }}-${{ inputs.arch }}-artifacts + path: ${{ env.artifact_paths }} + + ParquetS3: + strategy: + fail-fast: false + matrix: + STORAGE: [minio, aws_s3] + needs: [runner_labels_setup] + runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }} + timeout-minutes: ${{ inputs.timeout_minutes }} + steps: + - name: Checkout regression repo + uses: actions/checkout@v3 + with: + repository: Altinity/clickhouse-regression + ref: ${{ inputs.commit }} + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + REPORTS_PATH=${{ runner.temp }}/reports_dir + SUITE=parquet + STORAGE=${{ matrix.STORAGE}} + EOF + - name: Download json reports + uses: actions/download-artifact@v3 + with: + path: ${{ env.REPORTS_PATH }} + name: build_report_package_${{ inputs.arch }} + - name: Setup + run: .github/setup.sh + - name: Get deb url + run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV + - name: Run ${{ env.SUITE }} suite + run: python3 + -u ${{ env.SUITE }}/regression.py + --clickhouse-binary-path ${{ env.clickhouse_binary_path }} + --storage ${{ matrix.STORAGE }} + --aws-s3-bucket ${{ secrets.REGRESSION_AWS_S3_BUCKET }} + --aws-s3-region ${{ secrets.REGRESSION_AWS_S3_REGION }} + --aws-s3-key-id ${{ secrets.REGRESSION_AWS_S3_KEY_ID }} + --aws-s3-access-key ${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }} + --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_binary_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.id="$GITHUB_RUN_ID" job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)" + ${{ env.args }} + - name: Create and upload logs + if: always() + run: .github/create_and_upload_logs.sh 1 + - uses: actions/upload-artifact@v3 + if: always() + with: + name: ${{ env.SUITE }}-${{ env.STORAGE }}-${{ inputs.arch }}-artifacts + path: ${{ env.artifact_paths }} + + S3: + strategy: + fail-fast: false + matrix: + STORAGE: [minio, aws_s3, gcs] + needs: [runner_labels_setup] + runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }} + timeout-minutes: ${{ inputs.timeout_minutes }} + steps: + - name: Checkout regression repo + uses: actions/checkout@v3 + with: + repository: Altinity/clickhouse-regression + ref: ${{ inputs.commit }} + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + REPORTS_PATH=${{ runner.temp }}/reports_dir + SUITE=s3 + STORAGE=/${{ matrix.STORAGE }} + EOF + - name: Download json reports + uses: actions/download-artifact@v3 + with: + path: ${{ env.REPORTS_PATH }} + name: build_report_package_${{ inputs.arch }} + - name: Setup + run: .github/setup.sh + - name: Get deb url + run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV + - name: Run ${{ env.SUITE }} suite + run: python3 + -u ${{ env.SUITE }}/regression.py + --clickhouse-binary-path ${{ env.clickhouse_binary_path }} + --storage ${{ matrix.STORAGE }} + --gcs-uri ${{ secrets.REGRESSION_GCS_URI }} + --gcs-key-id ${{ secrets.REGRESSION_GCS_KEY_ID }} + --gcs-key-secret ${{ secrets.REGRESSION_GCS_KEY_SECRET }} + --aws-s3-bucket ${{ secrets.REGRESSION_AWS_S3_BUCKET }} + --aws-s3-region ${{ secrets.REGRESSION_AWS_S3_REGION }} + --aws-s3-key-id ${{ secrets.REGRESSION_AWS_S3_KEY_ID }} + --aws-s3-access-key ${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }} + --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_binary_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.id="$GITHUB_RUN_ID" job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)" + ${{ env.args }} + - name: Create and upload logs + if: always() + run: .github/create_and_upload_logs.sh 1 + - uses: actions/upload-artifact@v3 + if: always() + with: + name: ${{ env.SUITE }}-${{ matrix.STORAGE }}-${{ inputs.arch }}-artifacts + path: ${{ env.artifact_paths}} + + TieredStorage: + strategy: + fail-fast: false + matrix: + STORAGE: [minio, s3amazon, s3gcs] + needs: [runner_labels_setup] + runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }} + timeout-minutes: ${{ inputs.timeout_minutes }} + steps: + - name: Checkout regression repo + uses: actions/checkout@v3 + with: + repository: Altinity/clickhouse-regression + - name: Set envs + run: | + cat >> "$GITHUB_ENV" << 'EOF' + REPORTS_PATH=${{ runner.temp }}/reports_dir + SUITE=tiered_storage + STORAGE=/${{ matrix.STORAGE }} + EOF + - name: Download json reports + uses: actions/download-artifact@v3 + with: + path: ${{ env.REPORTS_PATH }} + name: build_report_package_${{ inputs.arch }} + - name: Setup + run: .github/setup.sh + - name: Get deb url + run: python3 .github/get-deb-url.py --reports-path ${{ env.REPORTS_PATH }} --github-env $GITHUB_ENV + - name: Run ${{ env.SUITE }} suite + run: python3 + -u ${{ env.SUITE }}/regression.py + --clickhouse-binary-path ${{ env.clickhouse_binary_path }} + --aws-s3-access-key ${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }} + --aws-s3-key-id ${{ secrets.REGRESSION_AWS_S3_KEY_ID }} + --aws-s3-uri https://s3.${{ secrets.REGRESSION_AWS_S3_REGION}}.amazonaws.com/${{ secrets.REGRESSION_AWS_S3_BUCKET }}/data/ + --gcs-key-id ${{ secrets.REGRESSION_GCS_KEY_ID }} + --gcs-key-secret ${{ secrets.REGRESSION_GCS_KEY_SECRET }} + --gcs-uri ${{ secrets.REGRESSION_GCS_URI }} + --with-${{ matrix.STORAGE }} + --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_binary_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.id="$GITHUB_RUN_ID" job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)" + ${{ env.args }} + - name: Create and upload logs + if: always() + run: .github/create_and_upload_logs.sh 1 + - uses: actions/upload-artifact@v3 + if: always() + with: + name: ${{ env.SUITE }}-${{ matrix.STORAGE }}-${{ inputs.arch }}-artifacts + path: ${{ env.artifact_paths}} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index 051a3ea1eacb..000000000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,66 +0,0 @@ -name: PublishedReleaseCI -# - Gets artifacts from S3 -# - Sends it to JFROG Artifactory -# - Adds them to the release assets - -on: # yamllint disable-line rule:truthy - release: - types: - - published - workflow_dispatch: - inputs: - tag: - description: 'Release tag' - required: true - type: string - -jobs: - ReleasePublish: - runs-on: [self-hosted, style-checker] - steps: - - name: Set tag from input - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV" - - name: Set tag from REF - if: github.event_name == 'release' - run: | - echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV" - - name: Deploy packages and assets - run: | - curl --silent --data '' --no-buffer \ - '${{ secrets.PACKAGES_RELEASE_URL }}/release/'"${GITHUB_TAG}"'?binary=binary_darwin&binary=binary_darwin_aarch64&sync=true' - ############################################################################################ - ##################################### Docker images ####################################### - ############################################################################################ - DockerServerImages: - runs-on: [self-hosted, style-checker] - steps: - - name: Set tag from input - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV" - - name: Set tag from REF - if: github.event_name == 'release' - run: | - echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV" - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - clear-repository: true - fetch-depth: 0 # otherwise we will have no version info - filter: tree:0 - ref: ${{ env.GITHUB_TAG }} - - name: Check docker clickhouse/clickhouse-server building - run: | - cd "$GITHUB_WORKSPACE/tests/ci" - python3 docker_server.py --release-type auto --version "$GITHUB_TAG" \ - --image-repo clickhouse/clickhouse-server --image-path docker/server - python3 docker_server.py --release-type auto --version "$GITHUB_TAG" \ - --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper - - name: Cleanup - if: always() - run: | - docker ps --quiet | xargs --no-run-if-empty docker kill ||: - docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: - sudo rm -fr "$TEMP_PATH" diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index b5771fa87ab9..6a81bef63aee 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -4,22 +4,44 @@ name: ReleaseBranchCI env: # Force the stdout and stderr streams to be unbuffered PYTHONUNBUFFERED: 1 + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} on: # yamllint disable-line rule:truthy + pull_request: + types: + - synchronize + - reopened + - opened + branches: + # Anything/23.8 (e.g customizations/23.8.x) + - '**/23.8*' + release: + types: + - published + - prereleased push: branches: - # 22.1 and 22.10 - - '2[1-9].[1-9][0-9]' - - '2[1-9].[1-9]' + - 'releases/23.8**' jobs: DockerHubPushAarch64: - runs-on: [self-hosted, style-checker-aarch64] + runs-on: [self-hosted, style-checker, on-demand, type-cax41, in-fsn1, image-arm-app-docker-ce] steps: - name: Check out repository code - uses: ClickHouse/checkout@v1 + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 with: clear-repository: true + + - name: Common docker setup + uses: ./.github/actions/docker_setup + with: + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + - name: Images check run: | cd "$GITHUB_WORKSPACE/tests/ci" @@ -29,13 +51,21 @@ jobs: with: name: changed_images_aarch64 path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json + DockerHubPushAmd64: - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, style-checker, on-demand, type-cpx51, image-x86-app-docker-ce] steps: - name: Check out repository code - uses: ClickHouse/checkout@v1 + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 with: clear-repository: true + + - name: Common docker setup + uses: ./.github/actions/docker_setup + with: + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + - name: Images check run: | cd "$GITHUB_WORKSPACE/tests/ci" @@ -45,105 +75,84 @@ jobs: with: name: changed_images_amd64 path: ${{ runner.temp }}/docker_images_check/changed_images_amd64.json + DockerHubPush: needs: [DockerHubPushAmd64, DockerHubPushAarch64] - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, style-checker, on-demand, type-cpx41, image-x86-app-docker-ce] steps: - name: Check out repository code - uses: ClickHouse/checkout@v1 + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 with: clear-repository: true fetch-depth: 0 # to find ancestor merge commits necessary for finding proper docker tags filter: tree:0 + + - name: Common docker setup + uses: ./.github/actions/docker_setup + with: + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + - name: Download changed aarch64 images uses: actions/download-artifact@v3 with: name: changed_images_aarch64 path: ${{ runner.temp }} + - name: Download changed amd64 images uses: actions/download-artifact@v3 with: name: changed_images_amd64 path: ${{ runner.temp }} + - name: Images check run: | cd "$GITHUB_WORKSPACE/tests/ci" python3 docker_manifests_merge.py --suffix amd64 --suffix aarch64 + - name: Upload images files to artifacts uses: actions/upload-artifact@v3 with: name: changed_images path: ${{ runner.temp }}/changed_images.json - CompatibilityCheckX86: + + CompatibilityCheck: needs: [BuilderDebRelease] uses: ./.github/workflows/reusable_test.yml + secrets: inherit with: test_name: Compatibility check X86 - runner_type: style-checker + runner_type: style-checker, on-demand, type-cpx41, image-x86-app-docker-ce + timeout_minutes: 180 run_command: | cd "$REPO_COPY/tests/ci" python3 compatibility_check.py --check-name "Compatibility check (amd64)" --check-glibc --check-distributions - CompatibilityCheckAarch64: - needs: [BuilderDebAarch64] - uses: ./.github/workflows/reusable_test.yml - with: - test_name: Compatibility check X86 - runner_type: style-checker - run_command: | - cd "$REPO_COPY/tests/ci" - python3 compatibility_check.py --check-name "Compatibility check (aarch64)" --check-glibc + ######################################################################################### #################################### ORDINARY BUILDS #################################### ######################################################################################### BuilderDebRelease: needs: [DockerHubPush] uses: ./.github/workflows/reusable_build.yml + secrets: inherit with: build_name: package_release checkout_depth: 0 + timeout_minutes: 180 + runner_type: builder, on-demand, type-ccx53, image-x86-app-docker-ce + additional_envs: | + CLICKHOUSE_STABLE_VERSION_SUFFIX=altinitystable + BuilderDebAarch64: needs: [DockerHubPush] uses: ./.github/workflows/reusable_build.yml + secrets: inherit with: build_name: package_aarch64 checkout_depth: 0 - BuilderDebAsan: - needs: [DockerHubPush] - uses: ./.github/workflows/reusable_build.yml - with: - build_name: package_asan - BuilderDebUBsan: - needs: [DockerHubPush] - uses: ./.github/workflows/reusable_build.yml - with: - build_name: package_ubsan - BuilderDebTsan: - needs: [DockerHubPush] - uses: ./.github/workflows/reusable_build.yml - with: - build_name: package_tsan - BuilderDebMsan: - needs: [DockerHubPush] - uses: ./.github/workflows/reusable_build.yml - with: - build_name: package_msan - BuilderDebDebug: - needs: [DockerHubPush] - uses: ./.github/workflows/reusable_build.yml - with: - build_name: package_debug - BuilderBinDarwin: - needs: [DockerHubPush] - uses: ./.github/workflows/reusable_build.yml - with: - build_name: binary_darwin - checkout_depth: 0 - BuilderBinDarwinAarch64: - needs: [DockerHubPush] - uses: ./.github/workflows/reusable_build.yml - with: - build_name: binary_darwin_aarch64 - checkout_depth: 0 + runner_type: builder, on-demand, type-ccx53, image-x86-app-docker-ce + + ############################################################################################ ##################################### Docker images ####################################### ############################################################################################ @@ -151,27 +160,30 @@ jobs: needs: - BuilderDebRelease - BuilderDebAarch64 - runs-on: [self-hosted, style-checker] + runs-on: [self-hosted, style-checker, on-demand, type-cpx51, image-x86-app-docker-ce] + timeout-minutes: 180 steps: - name: Check out repository code - uses: ClickHouse/checkout@v1 + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 with: clear-repository: true fetch-depth: 0 # It MUST BE THE SAME for all dependencies and the job itself filter: tree:0 - - name: Check docker clickhouse/clickhouse-server building + - name: Check docker altinityinfra/clickhouse-server building run: | cd "$GITHUB_WORKSPACE/tests/ci" - python3 docker_server.py --release-type head --no-push \ - --image-repo clickhouse/clickhouse-server --image-path docker/server - python3 docker_server.py --release-type head --no-push \ - --image-repo clickhouse/clickhouse-keeper --image-path docker/keeper + docker buildx create --use + python3 docker_server.py --release-type head \ + --image-repo altinityinfra/clickhouse-server --image-path docker/server + python3 docker_server.py --release-type head \ + --image-repo altinityinfra/clickhouse-keeper --image-path docker/keeper - name: Cleanup if: always() run: | docker ps --quiet | xargs --no-run-if-empty docker kill ||: docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: sudo rm -fr "$TEMP_PATH" + ############################################################################################ ##################################### BUILD REPORTER ####################################### ############################################################################################ @@ -180,31 +192,12 @@ jobs: needs: - BuilderDebRelease - BuilderDebAarch64 - - BuilderDebAsan - - BuilderDebTsan - - BuilderDebUBsan - - BuilderDebMsan - - BuilderDebDebug uses: ./.github/workflows/reusable_test.yml + secrets: inherit with: test_name: ClickHouse build check - runner_type: style-checker - additional_envs: | - NEEDS_DATA<> "$GITHUB_ENV" << 'EOF' + TEMP_PATH=${{runner.temp}}/signed + REPORTS_PATH=${{runner.temp}}/reports_dir + EOF + - name: Clear repository + run: | + sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE" + - name: Check out repository code + uses: actions/checkout@v2 + - name: Download json reports + uses: actions/download-artifact@v2 + with: + path: ${{ env.REPORTS_PATH }} + - name: Sign release + env: + GPG_BINARY_SIGNING_KEY: ${{ secrets.GPG_BINARY_SIGNING_KEY }} + GPG_BINARY_SIGNING_PASSPHRASE: ${{ secrets.GPG_BINARY_SIGNING_PASSPHRASE }} + REPORTS_PATH: ${{ env.REPORTS_PATH }} + run: | + cd "$GITHUB_WORKSPACE/tests/ci" + python3 sign_release.py + - name: Upload signed hashes + uses: actions/upload-artifact@v2 + with: + name: signed-hashes + path: ${{ env.TEMP_PATH }}/*.gpg + - name: Cleanup + if: always() + run: | + docker ps --quiet | xargs --no-run-if-empty docker kill ||: + docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: + sudo rm -fr "$TEMP_PATH" + + ########################################################################################### + ################################ FINISH CHECK ############################################# + ########################################################################################### FinishCheck: needs: - DockerHubPush - DockerServerImages - BuilderReport - - BuilderSpecialReport + # - BuilderSpecialReport - MarkReleaseReady - - FunctionalStatelessTestDebug - FunctionalStatelessTestRelease - FunctionalStatelessTestAarch64 - - FunctionalStatelessTestAsan - - FunctionalStatelessTestTsan - - FunctionalStatelessTestMsan - - FunctionalStatelessTestUBsan - - FunctionalStatefulTestDebug - FunctionalStatefulTestRelease - FunctionalStatefulTestAarch64 - - FunctionalStatefulTestAsan - - FunctionalStatefulTestTsan - - FunctionalStatefulTestMsan - - FunctionalStatefulTestUBsan - - StressTestDebug - - StressTestAsan - - StressTestTsan - - StressTestMsan - - StressTestUBsan - - IntegrationTestsAsan - - IntegrationTestsTsan - IntegrationTestsRelease - - CompatibilityCheckX86 - - CompatibilityCheckAarch64 - runs-on: [self-hosted, style-checker] + - CompatibilityCheck + - RegressionTestsRelease + - RegressionTestsAarch64 + - SignRelease + runs-on: [self-hosted, style-checker, on-demand, type-cpx31, image-x86-app-docker-ce] steps: - name: Check out repository code - uses: ClickHouse/checkout@v1 + uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 with: clear-repository: true - name: Finish label diff --git a/.github/workflows/reusable_build.yml b/.github/workflows/reusable_build.yml index f36b93bea588..4204798f9861 100644 --- a/.github/workflows/reusable_build.yml +++ b/.github/workflows/reusable_build.yml @@ -1,10 +1,6 @@ ### For the pure soul wishes to move it to another place # https://github.com/orgs/community/discussions/9050 -env: - # Force the stdout and stderr streams to be unbuffered - PYTHONUNBUFFERED: 1 - name: Build ClickHouse 'on': workflow_call: @@ -14,38 +10,104 @@ name: Build ClickHouse required: true type: string checkout_depth: - description: the value of the git shallow checkout + description: the value of the git shallow checkout. required: false type: number default: 1 runner_type: - description: the label of runner to use + description: the label of runner to use, can be a simple string or a comma-separated list. default: builder type: string + timeout_minutes: + description: Maximum number of minutes to let workflow run before GitHub cancels it. + default: 120 + type: number additional_envs: - description: additional ENV variables to setup the job + description: additional ENV variables to setup the job. type: string + secrets: + AWS_SECRET_ACCESS_KEY: + description: the access key to the aws param store. + required: true + AWS_ACCESS_KEY_ID: + description: the access key id to the aws param store. + required: true + AWS_DEFAULT_REGION: + description: the region of the aws param store. + required: true + DOCKER_USERNAME: + description: username of the docker user. + required: true + DOCKER_PASSWORD: + description: password to the docker user. + required: true + +env: + # Force the stdout and stderr streams to be unbuffered + PYTHONUNBUFFERED: 1 + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} jobs: + runner_labels_setup: + name: Compute proper runner labels for the rest of the jobs + runs-on: ubuntu-latest + outputs: + runner_labels: ${{ steps.setVariables.outputs.runner_labels }} + steps: + - id: setVariables + name: Prepare runner_labels variables for the later steps + run: | + + # Prepend self-hosted + input="self-hosted, ${input}" + + # Remove all whitespace + input="$(echo ${input} | tr -d [:space:])" + # Make something like a JSON array from comma-separated list + input="['${input}']" + input="${input//\,/\'\, \'}" + + echo "runner_labels=$input" >> ${GITHUB_OUTPUT} + env: + input: ${{ inputs.runner_type }} + Build: name: Build-${{inputs.build_name}} + needs: runner_labels_setup + runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }} + env: GITHUB_JOB_OVERRIDDEN: Build-${{inputs.build_name}} - runs-on: [self-hosted, '${{inputs.runner_type}}'] + timeout-minutes: ${{inputs.timeout_minutes}} steps: + - name: Debug input runer tag names + run: | + cat <> "$GITHUB_ENV" << 'EOF' ${{inputs.additional_envs}} EOF python3 "$GITHUB_WORKSPACE"/tests/ci/ci_config.py --build-name "${{inputs.build_name}}" >> "$GITHUB_ENV" + - name: Apply sparse checkout for contrib # in order to check that it doesn't break build # This step is done in GITHUB_WORKSPACE, # because it's broken in REPO_COPY for some reason @@ -56,24 +118,35 @@ jobs: "$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK' du -hs "$GITHUB_WORKSPACE/contrib" ||: find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||: + - name: Common setup uses: ./.github/actions/common_setup with: job_type: build_check + + - name: Common docker setup + uses: ./.github/actions/docker_setup + with: + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + - name: Download changed images uses: actions/download-artifact@v3 with: name: changed_images path: ${{ env.IMAGES_PATH }} + - name: Build run: | cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME" + - name: Upload build URLs to artifacts if: ${{ success() || failure() }} uses: actions/upload-artifact@v3 with: name: ${{ env.BUILD_URLS }} path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json + - name: Clean if: always() uses: ./.github/actions/clean diff --git a/.github/workflows/reusable_test.yml b/.github/workflows/reusable_test.yml index e82d2d515963..c20f32bc9f88 100644 --- a/.github/workflows/reusable_test.yml +++ b/.github/workflows/reusable_test.yml @@ -10,9 +10,13 @@ name: Testing workflow required: true type: string runner_type: - description: the label of runner to use + description: the label of runner to use, can be a simple string or a comma-separated list required: true type: string + timeout_minutes: + description: Maximum number of minutes to let workflow run before GitHub cancels it. + default: 120 + type: number run_command: description: the command to launch the check. Usually starts with `cd '$REPO_COPY/tests/ci'` required: true @@ -38,18 +42,61 @@ name: Testing workflow secret_envs: description: if given, it's passed to the environments required: false + AWS_SECRET_ACCESS_KEY: + description: the access key to the aws param store. + required: true + AWS_ACCESS_KEY_ID: + description: the access key id to the aws param store. + required: true + AWS_DEFAULT_REGION: + description: the region of the aws param store. + required: true + DOCKER_USERNAME: + description: username of the docker user. + required: true + DOCKER_PASSWORD: + description: password to the docker user. + required: true env: # Force the stdout and stderr streams to be unbuffered PYTHONUNBUFFERED: 1 CHECK_NAME: ${{inputs.test_name}} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} jobs: + runner_labels_setup: + name: Compute proper runner labels for the rest of the jobs + runs-on: ubuntu-latest + outputs: + runner_labels: ${{ steps.setVariables.outputs.runner_labels }} + steps: + - id: setVariables + name: Prepare runner_labels variables for the later steps + run: | + + # Prepend self-hosted + input="self-hosted, ${input}" + + # Remove all whitespace + input="$(echo ${input} | tr -d [:space:])" + # Make something like a JSON array from comma-separated list + input="[ '${input//\,/\'\, \'}' ]" + + echo "runner_labels=$input" >> ${GITHUB_OUTPUT} + env: + input: ${{ inputs.runner_type }} + PrepareStrategy: # batches < 1 is misconfiguration, # and we need this step only for batches > 1 if: ${{ inputs.batches > 1 }} - runs-on: [self-hosted, style-checker-aarch64] + runs-on: ubuntu-latest #TODO(vnemkov): NO need for a beefy custom runner for a simple script + #runs-on: [self-hosted, style-checker-aarch64] outputs: batches: ${{steps.batches.outputs.batches}} steps: @@ -68,8 +115,11 @@ jobs: name: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }} env: GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ inputs.batches > 1 && format('-{0}',matrix.batch) || '' }} - runs-on: [self-hosted, '${{inputs.runner_type}}'] - needs: [PrepareStrategy] + + needs: [PrepareStrategy, runner_labels_setup] + runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }} + + timeout-minutes: ${{inputs.timeout_minutes}} strategy: fail-fast: false # we always wait for entire matrix matrix: @@ -78,27 +128,43 @@ jobs: && fromJson(needs.PrepareStrategy.outputs.batches) || fromJson('[0]')}} steps: + - name: Debug input runer tag names + run: | + cat <> "$GITHUB_ENV" << 'EOF' ${{inputs.additional_envs}} ${{secrets.secret_envs}} EOF + - name: Common setup uses: ./.github/actions/common_setup with: job_type: test + - name: Download json reports uses: actions/download-artifact@v3 with: path: ${{ env.REPORTS_PATH }} + + - name: Docker setup + uses: ./.github/actions/docker_setup + with: + DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + - name: Setup batch if: ${{ inputs.batches > 1}} run: | @@ -106,8 +172,10 @@ jobs: RUN_BY_HASH_NUM=${{matrix.batch}} RUN_BY_HASH_TOTAL=${{inputs.batches}} EOF + - name: Run test run: ${{inputs.run_command}} + - name: Clean if: always() uses: ./.github/actions/clean diff --git a/.github/workflows/tags_stable.yml b/.github/workflows/tags_stable.yml deleted file mode 100644 index 0a3945829ca5..000000000000 --- a/.github/workflows/tags_stable.yml +++ /dev/null @@ -1,73 +0,0 @@ -name: TagsStableWorkflow -# - Gets artifacts from S3 -# - Sends it to JFROG Artifactory -# - Adds them to the release assets - -env: - # Force the stdout and stderr streams to be unbuffered - PYTHONUNBUFFERED: 1 - -on: # yamllint disable-line rule:truthy - push: - tags: - - 'v*-prestable' - - 'v*-stable' - - 'v*-lts' - workflow_dispatch: - inputs: - tag: - description: 'Test tag' - required: true - type: string - - -jobs: - UpdateVersions: - runs-on: [self-hosted, style-checker] - steps: - - name: Set test tag - if: github.event_name == 'workflow_dispatch' - run: | - echo "GITHUB_TAG=${{ github.event.inputs.tag }}" >> "$GITHUB_ENV" - - name: Get tag name - if: github.event_name != 'workflow_dispatch' - run: | - echo "GITHUB_TAG=${GITHUB_REF#refs/tags/}" >> "$GITHUB_ENV" - - name: Check out repository code - uses: ClickHouse/checkout@v1 - with: - ref: master - fetch-depth: 0 - filter: tree:0 - - name: Update versions, docker version, changelog, security - env: - GITHUB_TOKEN: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} - run: | - ./utils/list-versions/list-versions.sh > ./utils/list-versions/version_date.tsv - ./utils/list-versions/update-docker-version.sh - GID=$(id -g "${UID}") - docker run -u "${UID}:${GID}" -e PYTHONUNBUFFERED=1 \ - --volume="${GITHUB_WORKSPACE}:/ClickHouse" clickhouse/style-test \ - /ClickHouse/utils/changelog/changelog.py -v --debug-helpers \ - --gh-user-or-token="$GITHUB_TOKEN" --jobs=5 \ - --output="/ClickHouse/docs/changelogs/${GITHUB_TAG}.md" "${GITHUB_TAG}" - git add "./docs/changelogs/${GITHUB_TAG}.md" - python3 ./utils/security-generator/generate_security.py > SECURITY.md - git diff HEAD - - name: Create Pull Request - uses: peter-evans/create-pull-request@v3 - with: - author: "robot-clickhouse " - token: ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} - committer: "robot-clickhouse " - commit-message: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }} - branch: auto/${{ env.GITHUB_TAG }} - assignees: ${{ github.event.sender.login }} # assign the PR to the tag pusher - delete-branch: true - title: Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }} - labels: do not test - body: | - Update version_date.tsv and changelogs after ${{ env.GITHUB_TAG }} - - ### Changelog category (leave one): - - Not for changelog (changelog entry is not required) diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index 3bd64cf1d74b..49f999660a6f 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -7,6 +7,10 @@ SET(VERSION_MAJOR 23) SET(VERSION_MINOR 8) SET(VERSION_PATCH 8) SET(VERSION_GITHASH 812b95e14ba8cf744bf1d70e6de607cf130a79fa) -SET(VERSION_DESCRIBE v23.8.8.1-lts) -SET(VERSION_STRING 23.8.8.1) + +SET(VERSION_TWEAK 21) +SET(VERSION_FLAVOUR altinitystable) + +SET(VERSION_DESCRIBE v23.8.8.21.altinitystable) +SET(VERSION_STRING 23.8.8.21.altinitystable) # end of autochange diff --git a/cmake/version.cmake b/cmake/version.cmake index 9ca21556f4d4..06fb783b88f2 100644 --- a/cmake/version.cmake +++ b/cmake/version.cmake @@ -19,5 +19,5 @@ set (VERSION_STRING_SHORT "${VERSION_MAJOR}.${VERSION_MINOR}") math (EXPR VERSION_INTEGER "${VERSION_PATCH} + ${VERSION_MINOR}*1000 + ${VERSION_MAJOR}*1000000") if(CLICKHOUSE_OFFICIAL_BUILD) - set(VERSION_OFFICIAL " (official build)") + set(VERSION_OFFICIAL " (altinity build)") endif() diff --git a/docker/images.json b/docker/images.json index d895e2da2f03..17fcda59c017 100644 --- a/docker/images.json +++ b/docker/images.json @@ -1,32 +1,30 @@ { "docker/packager/binary": { - "name": "clickhouse/binary-builder", - "dependent": [ - "docker/test/codebrowser" - ] + "name": "altinityinfra/binary-builder", + "dependent": [] }, "docker/test/compatibility/centos": { - "name": "clickhouse/test-old-centos", + "name": "altinityinfra/test-old-centos", "dependent": [] }, "docker/test/compatibility/ubuntu": { - "name": "clickhouse/test-old-ubuntu", + "name": "altinityinfra/test-old-ubuntu", "dependent": [] }, "docker/test/integration/base": { - "name": "clickhouse/integration-test", + "name": "altinityinfra/integration-test", "dependent": [] }, "docker/test/fuzzer": { - "name": "clickhouse/fuzzer", + "name": "altinityinfra/fuzzer", "dependent": [] }, "docker/test/performance-comparison": { - "name": "clickhouse/performance-comparison", + "name": "altinityinfra/performance-comparison", "dependent": [] }, "docker/test/util": { - "name": "clickhouse/test-util", + "name": "altinityinfra/test-util", "dependent": [ "docker/packager/binary", "docker/test/base", @@ -34,140 +32,129 @@ ] }, "docker/test/stateless": { - "name": "clickhouse/stateless-test", + "name": "altinityinfra/stateless-test", "dependent": [ "docker/test/stateful", "docker/test/unit" ] }, "docker/test/stateful": { - "name": "clickhouse/stateful-test", + "name": "altinityinfra/stateful-test", "dependent": [ "docker/test/stress", "docker/test/upgrade" ] }, "docker/test/unit": { - "name": "clickhouse/unit-test", + "name": "altinityinfra/unit-test", "dependent": [] }, "docker/test/stress": { - "name": "clickhouse/stress-test", + "name": "altinityinfra/stress-test", "dependent": [] }, "docker/test/upgrade": { - "name": "clickhouse/upgrade-check", - "dependent": [] - }, - "docker/test/codebrowser": { - "name": "clickhouse/codebrowser", + "name": "altinityinfra/upgrade-check", "dependent": [] }, "docker/test/integration/runner": { "only_amd64": true, - "name": "clickhouse/integration-tests-runner", + "name": "altinityinfra/integration-tests-runner", "dependent": [] }, "docker/test/fasttest": { - "name": "clickhouse/fasttest", + "name": "altinityinfra/fasttest", "dependent": [] }, "docker/test/style": { - "name": "clickhouse/style-test", + "name": "altinityinfra/style-test", "dependent": [] }, "docker/test/integration/s3_proxy": { - "name": "clickhouse/s3-proxy", + "only_amd64": true, + "name": "altinityinfra/s3-proxy", "dependent": [] }, "docker/test/integration/resolver": { - "name": "clickhouse/python-bottle", + "only_amd64": true, + "name": "altinityinfra/python-bottle", "dependent": [] }, "docker/test/integration/helper_container": { - "name": "clickhouse/integration-helper", + "only_amd64": true, + "name": "altinityinfra/integration-helper", "dependent": [] }, "docker/test/integration/mysql_golang_client": { - "name": "clickhouse/mysql-golang-client", + "only_amd64": true, + "name": "altinityinfra/mysql-golang-client", "dependent": [] }, "docker/test/integration/dotnet_client": { - "name": "clickhouse/dotnet-client", + "only_amd64": true, + "name": "altinityinfra/dotnet-client", "dependent": [] }, "docker/test/integration/mysql_java_client": { - "name": "clickhouse/mysql-java-client", + "only_amd64": true, + "name": "altinityinfra/mysql-java-client", "dependent": [] }, "docker/test/integration/mysql_js_client": { - "name": "clickhouse/mysql-js-client", + "only_amd64": true, + "name": "altinityinfra/mysql-js-client", "dependent": [] }, "docker/test/integration/mysql_php_client": { - "name": "clickhouse/mysql-php-client", + "only_amd64": true, + "name": "altinityinfra/mysql-php-client", "dependent": [] }, "docker/test/integration/postgresql_java_client": { - "name": "clickhouse/postgresql-java-client", + "only_amd64": true, + "name": "altinityinfra/postgresql-java-client", "dependent": [] }, "docker/test/integration/kerberos_kdc": { "only_amd64": true, - "name": "clickhouse/kerberos-kdc", + "name": "altinityinfra/kerberos-kdc", "dependent": [] }, "docker/test/base": { - "name": "clickhouse/test-base", - "dependent": [ - "docker/test/fuzzer", - "docker/test/integration/base", - "docker/test/keeper-jepsen", - "docker/test/server-jepsen", - "docker/test/sqllogic", - "docker/test/sqltest", - "docker/test/stateless" - ] + "name": "altinityinfra/test-base", + "dependent": [ + "docker/test/stateless", + "docker/test/integration/base" + ] }, "docker/test/integration/kerberized_hadoop": { "only_amd64": true, - "name": "clickhouse/kerberized-hadoop", - "dependent": [] - }, - "docker/test/sqlancer": { - "name": "clickhouse/sqlancer-test", - "dependent": [] - }, - "docker/test/keeper-jepsen": { - "name": "clickhouse/keeper-jepsen-test", + "name": "altinityinfra/kerberized-hadoop", "dependent": [] }, "docker/test/server-jepsen": { - "name": "clickhouse/server-jepsen-test", + "name": "altinityinfra/server-jepsen-test", "dependent": [] }, "docker/test/install/deb": { - "name": "clickhouse/install-deb-test", + "name": "altinityinfra/install-deb-test", "dependent": [] }, "docker/test/install/rpm": { - "name": "clickhouse/install-rpm-test", - "dependent": [] - }, - "docker/docs/builder": { - "name": "clickhouse/docs-builder", + "name": "altinityinfra/install-rpm-test", "dependent": [] }, "docker/test/sqllogic": { - "name": "clickhouse/sqllogic-test", + "name": "altinityinfra/sqllogic-test", "dependent": [] }, "docker/test/sqltest": { - "name": "clickhouse/sqltest", + "name": "altinityinfra/sqltest", "dependent": [] }, "docker/test/integration/nginx_dav": { - "name": "clickhouse/nginx-dav", + "only_amd64": true, + "name": "altinityinfra/nginx-dav", "dependent": [] } } diff --git a/docker/packager/binary/Dockerfile b/docker/packager/binary/Dockerfile index 0a6896a7c6d5..b703fa82665f 100644 --- a/docker/packager/binary/Dockerfile +++ b/docker/packager/binary/Dockerfile @@ -1,7 +1,8 @@ -# docker build -t clickhouse/binary-builder . + +# docker build -t altinityinfra/binary-builder . ARG FROM_TAG=latest -FROM clickhouse/test-util:latest AS cctools -# The cctools are built always from the clickhouse/test-util:latest and cached inline +FROM altinityinfra/test-util:$FROM_TAG as cctools +# The cctools are built always from the altinityinfra/test-util:latest and cached inline # Theoretically, it should improve rebuild speed significantly ENV CC=clang-${LLVM_VERSION} ENV CXX=clang++-${LLVM_VERSION} @@ -37,7 +38,7 @@ RUN git clone https://github.com/tpoechtrager/cctools-port.git \ # END COMPILE # !!!!!!!!!!! -FROM clickhouse/test-util:$FROM_TAG +FROM altinityinfra/test-util:$FROM_TAG ENV CC=clang-${LLVM_VERSION} ENV CXX=clang++-${LLVM_VERSION} @@ -58,8 +59,7 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ # NOTE: Seems like gcc-11 is too new for ubuntu20 repository # A cross-linker for RISC-V 64 (we need it, because LLVM's LLD does not work): -RUN add-apt-repository ppa:ubuntu-toolchain-r/test --yes \ - && apt-get update \ +RUN apt-get update \ && apt-get install --yes \ binutils-riscv64-linux-gnu \ build-essential \ diff --git a/docker/packager/binary/build.sh b/docker/packager/binary/build.sh index 8590fcd2851d..4e2a4c123246 100755 --- a/docker/packager/binary/build.sh +++ b/docker/packager/binary/build.sh @@ -27,9 +27,13 @@ fi # export CCACHE_LOGFILE=/build/ccache.log # export CCACHE_DEBUG=1 +# TODO(vnemkov): this might not be needed anymore, but let's keep it for the reference. Maybe remove or un-comment on next build attempt? +# https://stackoverflow.com/a/71940133 +# git config --global --add safe.directory '*' mkdir -p /build/build_docker cd /build/build_docker + rm -f CMakeCache.txt # Read cmake arguments into array (possibly empty) read -ra CMAKE_FLAGS <<< "${CMAKE_FLAGS:-}" diff --git a/docker/packager/packager b/docker/packager/packager index 6b3a3f2bb245..c8025b64fc54 100755 --- a/docker/packager/packager +++ b/docker/packager/packager @@ -10,7 +10,7 @@ from typing import List, Optional SCRIPT_PATH = Path(__file__).absolute() IMAGE_TYPE = "binary" -IMAGE_NAME = f"clickhouse/{IMAGE_TYPE}-builder" +IMAGE_NAME = f"altinityinfra/{IMAGE_TYPE}-builder" class BuildException(Exception): @@ -100,13 +100,13 @@ def run_docker_image_with_env( else: user = f"{os.geteuid()}:{os.getegid()}" - ccache_mount = f"--volume={ccache_dir}:/ccache" + ccache_mount = f" --volume={ccache_dir}:/ccache" if ccache_dir is None: ccache_mount = "" cmd = ( f"docker run --network=host --user={user} --rm {ccache_mount}" - f"--volume={output_dir}:/output --volume={ch_root}:/build {env_part} " + f" --volume={output_dir}:/output --volume={ch_root}:/build {env_part} " f"--volume={cargo_cache_dir}:/rust/cargo/registry {interactive} {image_name}" ) diff --git a/docker/test/base/Dockerfile b/docker/test/base/Dockerfile index b55baa0e0fc3..4388884e9877 100644 --- a/docker/test/base/Dockerfile +++ b/docker/test/base/Dockerfile @@ -1,17 +1,17 @@ # rebuild in #33610 -# docker build -t clickhouse/test-base . +# docker build -t altinityinfra/test-base . ARG FROM_TAG=latest -FROM clickhouse/test-util:$FROM_TAG +FROM altinityinfra/test-util:$FROM_TAG RUN apt-get update \ && apt-get install \ - lcov \ - netbase \ - perl \ - pv \ - ripgrep \ - zstd \ - locales \ + lcov='1.15*' \ + netbase=6.3 \ + perl='5.34.*' \ + pv='1.6.*' \ + ripgrep='13.0.*' \ + zstd='1.4.*' \ + locales='2.35*' \ --yes --no-install-recommends # Sanitizer options for services (clickhouse-server) diff --git a/docker/test/codebrowser/Dockerfile b/docker/test/codebrowser/Dockerfile index 8136fd1fbbcd..f9761315be80 100644 --- a/docker/test/codebrowser/Dockerfile +++ b/docker/test/codebrowser/Dockerfile @@ -2,7 +2,7 @@ # docker build --network=host -t clickhouse/codebrowser . # docker run --volume=path_to_repo:/repo_folder --volume=path_to_result:/test_output clickhouse/codebrowser ARG FROM_TAG=latest -FROM clickhouse/binary-builder:$FROM_TAG +FROM altinityinfra/binary-builder:$FROM_TAG # ARG for quick switch to a given ubuntu mirror ARG apt_archive="http://archive.ubuntu.com" diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile index ad24e662a6c9..e6d7fea17179 100644 --- a/docker/test/fasttest/Dockerfile +++ b/docker/test/fasttest/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 # docker build -t clickhouse/fasttest . ARG FROM_TAG=latest -FROM clickhouse/test-util:$FROM_TAG +FROM altinityinfra/test-util:$FROM_TAG RUN apt-get update \ && apt-get install \ diff --git a/docker/test/fuzzer/Dockerfile b/docker/test/fuzzer/Dockerfile index 0bc0fb06633b..745db8769bb4 100644 --- a/docker/test/fuzzer/Dockerfile +++ b/docker/test/fuzzer/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 # docker build -t clickhouse/fuzzer . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG # ARG for quick switch to a given ubuntu mirror ARG apt_archive="http://archive.ubuntu.com" diff --git a/docker/test/integration/base/Dockerfile b/docker/test/integration/base/Dockerfile index 270b40e23a6d..db7f9a48e766 100644 --- a/docker/test/integration/base/Dockerfile +++ b/docker/test/integration/base/Dockerfile @@ -1,39 +1,39 @@ # rebuild in #33610 # docker build -t clickhouse/integration-test . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG SHELL ["/bin/bash", "-c"] RUN apt-get update \ && env DEBIAN_FRONTEND=noninteractive apt-get -y install \ - bsdutils \ - curl \ - default-jre \ - g++ \ - gdb \ - iproute2 \ - krb5-user \ - libicu-dev \ - libsqlite3-dev \ - libsqliteodbc \ - lsof \ - lz4 \ - odbc-postgresql \ - odbcinst \ - python3 \ - rpm2cpio \ - sqlite3 \ - tar \ - tzdata \ - unixodbc \ - python3-pip \ - libcurl4-openssl-dev \ - libssl-dev \ + bsdutils='1:2.37.*' \ + curl='7.81.*' \ + default-jre='2:1.11*' \ + g++='4:11*' \ + gdb='12.*' \ + iproute2='5.15.*' \ + krb5-user='1.19.*' \ + libicu-dev='70.*' \ + libsqlite3-dev='3.37.*' \ + libsqliteodbc='0.999*' \ + lsof='4.93.*' \ + lz4='1.9.*' \ + odbc-postgresql='1:13.02.*' \ + odbcinst='2.3.*' \ + python3='3.10.*' \ + rpm2cpio='4.17.*' \ + sqlite3='3.37.*' \ + tar='1.34*' \ + tzdata='2023c-0ubuntu0.22.04.*' \ + unixodbc='2.3.*' \ + python3-pip='22.0.*' \ + libcurl4-openssl-dev='7.81.*' \ + libssl-dev='3.0.*' \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* -RUN pip3 install pycurl +RUN pip3 install pycurl=='7.45.*' # Architecture of the image when BuildKit/buildx is used ARG TARGETARCH diff --git a/docker/test/integration/helper_container/Dockerfile b/docker/test/integration/helper_container/Dockerfile index 60adaea17961..aaff4e872297 100644 --- a/docker/test/integration/helper_container/Dockerfile +++ b/docker/test/integration/helper_container/Dockerfile @@ -1,8 +1,8 @@ # docker build -t clickhouse/integration-helper . # Helper docker container to run iptables without sudo -FROM alpine -RUN apk add --no-cache -U iproute2 \ +FROM alpine:3.18 +RUN apk add --no-cache -U iproute2~=6.3 \ && for bin in iptables iptables-restore iptables-save; \ do ln -sf xtables-nft-multi "/sbin/$bin"; \ done diff --git a/docker/test/integration/runner/Dockerfile b/docker/test/integration/runner/Dockerfile index d42fcb9baf67..d25f590bf51b 100644 --- a/docker/test/integration/runner/Dockerfile +++ b/docker/test/integration/runner/Dockerfile @@ -8,35 +8,35 @@ RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list RUN apt-get update \ && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \ - adduser \ - ca-certificates \ - bash \ - btrfs-progs \ - e2fsprogs \ - iptables \ - xfsprogs \ - tar \ - pigz \ - wget \ - git \ - iproute2 \ - cgroupfs-mount \ - python3-pip \ - tzdata \ - libicu-dev \ - bsdutils \ - curl \ - python3-pika \ + adduser='3.118*' \ + ca-certificates='20230311ubuntu0.22.04.*' \ + bash='5.1*' \ + btrfs-progs='5.16.*' \ + e2fsprogs='1.46.*' \ + iptables='1.8.*' \ + xfsprogs='5.13.*' \ + tar='1.34*' \ + pigz='2.6*' \ + wget='1.21.*' \ + git='1:2.34*' \ + iproute2='5.15.*' \ + cgroupfs-mount='1.4*' \ + python3-pip='22.0.*' \ + tzdata='2023c-0ubuntu0.22.04*' \ + libicu-dev='70.1*' \ + bsdutils='1:2.37.*' \ + curl='7.81.*' \ + python3-pika='1.2.*' \ liblua5.1-dev \ - luajit \ - libssl-dev \ - libcurl4-openssl-dev \ - gdb \ - default-jdk \ - software-properties-common \ - libkrb5-dev \ - krb5-user \ - g++ \ + luajit='2.1.*' \ + libssl-dev='3.0.*' \ + libcurl4-openssl-dev='7.81.*' \ + gdb='12.1*' \ + default-jdk='2:1.11*' \ + software-properties-common='0.99.*' \ + libkrb5-dev='1.19.*' \ + krb5-user='1.19.*' \ + g++='4:11.*' \ && rm -rf \ /var/lib/apt/lists/* \ /var/cache/debconf \ @@ -63,46 +63,46 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \ RUN python3 -m pip install --no-cache-dir \ - PyMySQL \ - aerospike==11.1.0 \ - asyncio \ - avro==1.10.2 \ - azure-storage-blob \ - cassandra-driver \ - confluent-kafka==1.9.2 \ - delta-spark==2.3.0 \ - dict2xml \ - dicttoxml \ - docker \ - docker-compose==1.29.2 \ - grpcio \ - grpcio-tools \ - kafka-python \ - kazoo \ - lz4 \ - meilisearch==0.18.3 \ - minio \ - nats-py \ - protobuf \ - psycopg2-binary==2.9.6 \ - pyhdfs \ - pymongo==3.11.0 \ - pyspark==3.3.2 \ - pytest \ - pytest-order==1.0.0 \ - pytest-random \ - pytest-repeat \ - pytest-timeout \ - pytest-xdist \ - pytz \ - pyyaml==5.3.1 \ - redis \ + PyMySQL=='1.1.*' \ + aerospike=='11.1.*' \ + asyncio=='3.4.*'\ + avro=='1.10.*' \ + azure-storage-blob=='12.19.*'\ + cassandra-driver=='3.28.*'\ + confluent-kafka=='1.9.*' \ + delta-spark=='2.3.*' \ + dict2xml=='1.7.*' \ + dicttoxml=='1.7.*' \ + docker=='6.1.*' \ + docker-compose=='1.29.*' \ + grpcio=='1.59.*' \ + grpcio-tools=='1.59.*' \ + kafka-python=='2.0.*' \ + kazoo=='2.9.*' \ + lz4=='4.3.*' \ + meilisearch=='0.18.*' \ + minio=='7.2.*' \ + nats-py=='2.6.*' \ + protobuf=='4.25.*' \ + psycopg2-binary=='2.9.*' \ + pyhdfs=='0.3.*' \ + pymongo=='3.11.*' \ + pyspark=='3.3.*' \ + pytest=='7.4.*' \ + pytest-order=='1.0.*' \ + pytest-random==0.2 \ + pytest-repeat=='0.9.*' \ + pytest-timeout=='2.2.*' \ + pytest-xdist=='3.5.*' \ + pytz=='2023.3.*' \ + pyyaml=='5.3.*' \ + redis=='5.0.*' \ requests-kerberos \ tzlocal==2.1 \ - retry \ - bs4 \ - lxml \ - urllib3 + retry=='0.9.*' \ + bs4=='0.0.*' \ + lxml=='4.9.*' \ + urllib3=='2.1.*' # bs4, lxml are for cloud tests, do not delete # Hudi supports only spark 3.3.*, not 3.4 diff --git a/docker/test/integration/runner/compose/docker_compose_clickhouse.yml b/docker/test/integration/runner/compose/docker_compose_clickhouse.yml index fdd124ede91a..ff4523c5b0d7 100644 --- a/docker/test/integration/runner/compose/docker_compose_clickhouse.yml +++ b/docker/test/integration/runner/compose/docker_compose_clickhouse.yml @@ -2,4 +2,4 @@ version: '2.3' # Used to pre-pull images with docker-compose services: clickhouse1: - image: clickhouse/integration-test + image: altinityinfra/integration-test diff --git a/docker/test/integration/runner/compose/docker_compose_dotnet_client.yml b/docker/test/integration/runner/compose/docker_compose_dotnet_client.yml index b63dac51522c..e5746fa209fb 100644 --- a/docker/test/integration/runner/compose/docker_compose_dotnet_client.yml +++ b/docker/test/integration/runner/compose/docker_compose_dotnet_client.yml @@ -1,6 +1,6 @@ version: '2.3' services: dotnet1: - image: clickhouse/dotnet-client:${DOCKER_DOTNET_CLIENT_TAG:-latest} + image: altinityinfra/dotnet-client:${DOCKER_DOTNET_CLIENT_TAG:-latest} # to keep container running command: sleep infinity diff --git a/docker/test/integration/runner/compose/docker_compose_jdbc_bridge.yml b/docker/test/integration/runner/compose/docker_compose_jdbc_bridge.yml index b3686adc21c4..1b02e282a21d 100644 --- a/docker/test/integration/runner/compose/docker_compose_jdbc_bridge.yml +++ b/docker/test/integration/runner/compose/docker_compose_jdbc_bridge.yml @@ -1,6 +1,7 @@ version: '2.3' services: bridge1: + # NOTE(vnemkov): not produced by CI/CD, so must not be replaced with altinityinfra/jdbc-bridge image: clickhouse/jdbc-bridge command: | /bin/bash -c 'cat << EOF > config/datasources/self.json diff --git a/docker/test/integration/runner/compose/docker_compose_keeper.yml b/docker/test/integration/runner/compose/docker_compose_keeper.yml index 91010c4aa83d..fba5bc728f88 100644 --- a/docker/test/integration/runner/compose/docker_compose_keeper.yml +++ b/docker/test/integration/runner/compose/docker_compose_keeper.yml @@ -1,7 +1,7 @@ version: '2.3' services: zoo1: - image: ${image:-clickhouse/integration-test} + image: ${image:-altinityinfra/integration-test} restart: always user: ${user:-} volumes: @@ -37,7 +37,7 @@ services: - inet6 - rotate zoo2: - image: ${image:-clickhouse/integration-test} + image: ${image:-altinityinfra/integration-test} restart: always user: ${user:-} volumes: @@ -73,7 +73,7 @@ services: - inet6 - rotate zoo3: - image: ${image:-clickhouse/integration-test} + image: ${image:-altinityinfra/integration-test} restart: always user: ${user:-} volumes: diff --git a/docker/test/integration/runner/compose/docker_compose_kerberized_hdfs.yml b/docker/test/integration/runner/compose/docker_compose_kerberized_hdfs.yml index e955a14eb3df..58d321177c0d 100644 --- a/docker/test/integration/runner/compose/docker_compose_kerberized_hdfs.yml +++ b/docker/test/integration/runner/compose/docker_compose_kerberized_hdfs.yml @@ -4,7 +4,7 @@ services: kerberizedhdfs1: cap_add: - DAC_READ_SEARCH - image: clickhouse/kerberized-hadoop:${DOCKER_KERBERIZED_HADOOP_TAG:-latest} + image: altinityinfra/kerberized-hadoop:${DOCKER_KERBERIZED_HADOOP_TAG:-latest} hostname: kerberizedhdfs1 restart: always volumes: @@ -24,7 +24,7 @@ services: net.ipv4.ip_local_port_range: '55000 65535' hdfskerberos: - image: clickhouse/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest} + image: altinityinfra/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest} hostname: hdfskerberos volumes: - ${KERBERIZED_HDFS_DIR}/secrets:/tmp/keytab diff --git a/docker/test/integration/runner/compose/docker_compose_kerberized_kafka.yml b/docker/test/integration/runner/compose/docker_compose_kerberized_kafka.yml index 49d4c1db90fe..7ae1011b1876 100644 --- a/docker/test/integration/runner/compose/docker_compose_kerberized_kafka.yml +++ b/docker/test/integration/runner/compose/docker_compose_kerberized_kafka.yml @@ -52,7 +52,7 @@ services: net.ipv4.ip_local_port_range: '55000 65535' kafka_kerberos: - image: clickhouse/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest} + image: altinityinfra/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest} hostname: kafka_kerberos volumes: - ${KERBERIZED_KAFKA_DIR}/secrets:/tmp/keytab diff --git a/docker/test/integration/runner/compose/docker_compose_kerberos_kdc.yml b/docker/test/integration/runner/compose/docker_compose_kerberos_kdc.yml index 3ce9a6df1fb6..062bdace6e9c 100644 --- a/docker/test/integration/runner/compose/docker_compose_kerberos_kdc.yml +++ b/docker/test/integration/runner/compose/docker_compose_kerberos_kdc.yml @@ -2,7 +2,7 @@ version: '2.3' services: kerberoskdc: - image: clickhouse/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest} + image: altinityinfra/kerberos-kdc:${DOCKER_KERBEROS_KDC_TAG:-latest} hostname: kerberoskdc volumes: - ${KERBEROS_KDC_DIR}/secrets:/tmp/keytab diff --git a/docker/test/integration/runner/compose/docker_compose_minio.yml b/docker/test/integration/runner/compose/docker_compose_minio.yml index f2979566296f..9b1748654238 100644 --- a/docker/test/integration/runner/compose/docker_compose_minio.yml +++ b/docker/test/integration/runner/compose/docker_compose_minio.yml @@ -21,14 +21,14 @@ services: # HTTP proxies for Minio. proxy1: - image: clickhouse/s3-proxy + image: altinityinfra/s3-proxy expose: - "8080" # Redirect proxy port - "80" # Reverse proxy port - "443" # Reverse proxy port (secure) proxy2: - image: clickhouse/s3-proxy + image: altinityinfra/s3-proxy expose: - "8080" - "80" @@ -36,7 +36,7 @@ services: # Empty container to run proxy resolver. resolver: - image: clickhouse/python-bottle + image: altinityinfra/python-bottle expose: - "8080" tty: true diff --git a/docker/test/integration/runner/compose/docker_compose_mysql_golang_client.yml b/docker/test/integration/runner/compose/docker_compose_mysql_golang_client.yml index 56cc04105740..09154b584244 100644 --- a/docker/test/integration/runner/compose/docker_compose_mysql_golang_client.yml +++ b/docker/test/integration/runner/compose/docker_compose_mysql_golang_client.yml @@ -1,6 +1,6 @@ version: '2.3' services: golang1: - image: clickhouse/mysql-golang-client:${DOCKER_MYSQL_GOLANG_CLIENT_TAG:-latest} + image: altinityinfra/mysql-golang-client:${DOCKER_MYSQL_GOLANG_CLIENT_TAG:-latest} # to keep container running command: sleep infinity diff --git a/docker/test/integration/runner/compose/docker_compose_mysql_java_client.yml b/docker/test/integration/runner/compose/docker_compose_mysql_java_client.yml index eb5ffb01baa2..a84cef915df2 100644 --- a/docker/test/integration/runner/compose/docker_compose_mysql_java_client.yml +++ b/docker/test/integration/runner/compose/docker_compose_mysql_java_client.yml @@ -1,6 +1,6 @@ version: '2.3' services: java1: - image: clickhouse/mysql-java-client:${DOCKER_MYSQL_JAVA_CLIENT_TAG:-latest} + image: altinityinfra/mysql-java-client:${DOCKER_MYSQL_JAVA_CLIENT_TAG:-latest} # to keep container running command: sleep infinity diff --git a/docker/test/integration/runner/compose/docker_compose_mysql_js_client.yml b/docker/test/integration/runner/compose/docker_compose_mysql_js_client.yml index 90939449c5f3..b46eb2706c47 100644 --- a/docker/test/integration/runner/compose/docker_compose_mysql_js_client.yml +++ b/docker/test/integration/runner/compose/docker_compose_mysql_js_client.yml @@ -1,6 +1,6 @@ version: '2.3' services: mysqljs1: - image: clickhouse/mysql-js-client:${DOCKER_MYSQL_JS_CLIENT_TAG:-latest} + image: altinityinfra/mysql-js-client:${DOCKER_MYSQL_JS_CLIENT_TAG:-latest} # to keep container running command: sleep infinity diff --git a/docker/test/integration/runner/compose/docker_compose_mysql_php_client.yml b/docker/test/integration/runner/compose/docker_compose_mysql_php_client.yml index 408b8ff089a9..662783a00a1f 100644 --- a/docker/test/integration/runner/compose/docker_compose_mysql_php_client.yml +++ b/docker/test/integration/runner/compose/docker_compose_mysql_php_client.yml @@ -1,6 +1,6 @@ version: '2.3' services: php1: - image: clickhouse/mysql-php-client:${DOCKER_MYSQL_PHP_CLIENT_TAG:-latest} + image: altinityinfra/mysql-php-client:${DOCKER_MYSQL_PHP_CLIENT_TAG:-latest} # to keep container running command: sleep infinity diff --git a/docker/test/integration/runner/compose/docker_compose_nginx.yml b/docker/test/integration/runner/compose/docker_compose_nginx.yml index 38d2a6d84c84..9d4403f283fb 100644 --- a/docker/test/integration/runner/compose/docker_compose_nginx.yml +++ b/docker/test/integration/runner/compose/docker_compose_nginx.yml @@ -5,7 +5,7 @@ services: # Files will be put into /usr/share/nginx/files. nginx: - image: clickhouse/nginx-dav:${DOCKER_NGINX_DAV_TAG:-latest} + image: altinityinfra/nginx-dav:${DOCKER_NGINX_DAV_TAG:-latest} restart: always ports: - 80:80 diff --git a/docker/test/integration/runner/compose/docker_compose_postgresql_java_client.yml b/docker/test/integration/runner/compose/docker_compose_postgresql_java_client.yml index 904bfffdfd5b..5c8673ae3eeb 100644 --- a/docker/test/integration/runner/compose/docker_compose_postgresql_java_client.yml +++ b/docker/test/integration/runner/compose/docker_compose_postgresql_java_client.yml @@ -1,6 +1,6 @@ version: '2.2' services: java: - image: clickhouse/postgresql-java-client:${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:-latest} + image: altinityinfra/postgresql-java-client:${DOCKER_POSTGRESQL_JAVA_CLIENT_TAG:-latest} # to keep container running command: sleep infinity diff --git a/docker/test/integration/runner/dockerd-entrypoint.sh b/docker/test/integration/runner/dockerd-entrypoint.sh index b05aef76faf8..0f9c1fa8ed9b 100755 --- a/docker/test/integration/runner/dockerd-entrypoint.sh +++ b/docker/test/integration/runner/dockerd-entrypoint.sh @@ -4,12 +4,12 @@ set -e mkdir -p /etc/docker/ echo '{ "ipv6": true, - "fixed-cidr-v6": "fd00::/8", + "fixed-cidr-v6": "2001:db8:1::/64", "ip-forward": true, "log-level": "debug", "storage-driver": "overlay2", - "insecure-registries" : ["dockerhub-proxy.dockerhub-proxy-zone:5000"], - "registry-mirrors" : ["http://dockerhub-proxy.dockerhub-proxy-zone:5000"] + "insecure-registries" : ["65.108.242.32:5000"], + "registry-mirrors" : ["http://65.108.242.32:5000"] }' | dd of=/etc/docker/daemon.json 2>/dev/null if [ -f /sys/fs/cgroup/cgroup.controllers ]; then @@ -38,9 +38,11 @@ while true; do reties=$((reties+1)) if [[ $reties -ge 100 ]]; then # 10 sec max echo "Can't start docker daemon, timeout exceeded." >&2 + cat /ClickHouse/tests/integration/dockerd.log >&2 exit 1; fi - sleep 0.1 + # For whatever reason docker seems to be unable to start in 10 seconds, so effectivly increeaing timeout to 30 seconds + sleep 0.3 done set -e diff --git a/docker/test/keeper-jepsen/Dockerfile b/docker/test/keeper-jepsen/Dockerfile index a794e076ec02..b93b07189012 100644 --- a/docker/test/keeper-jepsen/Dockerfile +++ b/docker/test/keeper-jepsen/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 # docker build -t clickhouse/keeper-jepsen-test . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG ENV DEBIAN_FRONTEND=noninteractive ENV CLOJURE_VERSION=1.10.3.814 diff --git a/docker/test/performance-comparison/Dockerfile b/docker/test/performance-comparison/Dockerfile index d31663f90711..9864cfe6649e 100644 --- a/docker/test/performance-comparison/Dockerfile +++ b/docker/test/performance-comparison/Dockerfile @@ -1,7 +1,7 @@ # docker build -t clickhouse/performance-comparison . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG RUN apt-get update \ && DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \ diff --git a/docker/test/server-jepsen/Dockerfile b/docker/test/server-jepsen/Dockerfile index a212427b2a1a..8625058e2502 100644 --- a/docker/test/server-jepsen/Dockerfile +++ b/docker/test/server-jepsen/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 # docker build -t clickhouse/server-jepsen-test . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG ENV DEBIAN_FRONTEND=noninteractive ENV CLOJURE_VERSION=1.10.3.814 diff --git a/docker/test/stateful/Dockerfile b/docker/test/stateful/Dockerfile index f513735a2d0a..85e2bee4bba9 100644 --- a/docker/test/stateful/Dockerfile +++ b/docker/test/stateful/Dockerfile @@ -1,7 +1,8 @@ # rebuild in #47031 # docker build -t clickhouse/stateful-test . ARG FROM_TAG=latest -FROM clickhouse/stateless-test:$FROM_TAG +# TODO consider replacing clickhouse with altinityinfra dockerhub account +FROM altinityinfra/stateless-test:$FROM_TAG RUN apt-get update -y \ && env DEBIAN_FRONTEND=noninteractive \ @@ -9,6 +10,8 @@ RUN apt-get update -y \ python3-requests \ nodejs \ npm \ + rpm2cpio \ + cpio \ && apt-get clean COPY s3downloader /s3downloader diff --git a/docker/test/stateful/setup_minio.sh b/docker/test/stateful/setup_minio.sh deleted file mode 120000 index 0d539f72cb34..000000000000 --- a/docker/test/stateful/setup_minio.sh +++ /dev/null @@ -1 +0,0 @@ -../stateless/setup_minio.sh \ No newline at end of file diff --git a/docker/test/stateful/setup_minio.sh b/docker/test/stateful/setup_minio.sh new file mode 100755 index 000000000000..c0deb46a9602 --- /dev/null +++ b/docker/test/stateful/setup_minio.sh @@ -0,0 +1,91 @@ +#!/bin/bash + +USAGE='Usage for local run: + +./docker/test/stateless/setup_minio.sh { stateful | stateless } ./tests/ + +' + +set -e -x -a -u + +TEST_TYPE="$1" +shift + +case $TEST_TYPE in + stateless) QUERY_DIR=0_stateless ;; + stateful) QUERY_DIR=1_stateful ;; + *) echo "unknown test type $TEST_TYPE"; echo "${USAGE}"; exit 1 ;; +esac + +ls -lha + +mkdir -p ./minio_data + +if [ ! -f ./minio ]; then + MINIO_SERVER_VERSION=${MINIO_SERVER_VERSION:-2022-01-03T18-22-58Z} + MINIO_CLIENT_VERSION=${MINIO_CLIENT_VERSION:-2022-01-05T23-52-51Z} + case $(uname -m) in + x86_64) BIN_ARCH=amd64 ;; + aarch64) BIN_ARCH=arm64 ;; + *) echo "unknown architecture $(uname -m)"; exit 1 ;; + esac + echo 'MinIO binary not found, downloading...' + + BINARY_TYPE=$(uname -s | tr '[:upper:]' '[:lower:]') + + wget "https://dl.min.io/server/minio/release/${BINARY_TYPE}-${BIN_ARCH}/archive/minio.RELEASE.${MINIO_SERVER_VERSION}" -O ./minio \ + && wget "https://dl.min.io/client/mc/release/${BINARY_TYPE}-${BIN_ARCH}/archive/mc.RELEASE.${MINIO_CLIENT_VERSION}" -O ./mc \ + && chmod +x ./mc ./minio +fi + +MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse} +MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse} + +./minio --version + +./minio server --address ":11111" ./minio_data & + +i=0 +while ! curl -v --silent http://localhost:11111 2>&1 | grep AccessDenied +do + if [[ $i == 60 ]]; then + echo "Failed to setup minio" + exit 0 + fi + echo "Trying to connect to minio" + sleep 1 + i=$((i + 1)) +done + +lsof -i :11111 + +sleep 5 + +./mc alias set clickminio http://localhost:11111 clickhouse clickhouse +./mc admin user add clickminio test testtest +./mc admin policy set clickminio readwrite user=test +./mc mb clickminio/test +if [ "$TEST_TYPE" = "stateless" ]; then + ./mc policy set public clickminio/test +fi + + +# Upload data to Minio. By default after unpacking all tests will in +# /usr/share/clickhouse-test/queries + +TEST_PATH=${1:-/usr/share/clickhouse-test} +MINIO_DATA_PATH=${TEST_PATH}/queries/${QUERY_DIR}/data_minio + +# Iterating over globs will cause redudant FILE variale to be a path to a file, not a filename +# shellcheck disable=SC2045 +for FILE in $(ls "${MINIO_DATA_PATH}"); do + echo "$FILE"; + ./mc cp "${MINIO_DATA_PATH}"/"$FILE" clickminio/test/"$FILE"; +done + +mkdir -p ~/.aws +cat <> ~/.aws/credentials +[default] +aws_access_key_id=${MINIO_ROOT_USER} +aws_secret_access_key=${MINIO_ROOT_PASSWORD} +EOT diff --git a/docker/test/stateful/setup_minio.sh~c3e81877ca (Make builds and tests possible in Altinity's infrastructure) b/docker/test/stateful/setup_minio.sh~c3e81877ca (Make builds and tests possible in Altinity's infrastructure) new file mode 100755 index 000000000000..d077dea920c6 --- /dev/null +++ b/docker/test/stateful/setup_minio.sh~c3e81877ca (Make builds and tests possible in Altinity's infrastructure) @@ -0,0 +1,77 @@ +#!/bin/bash + +# TODO: Make this file shared with stateless tests +# +# Usage for local run: +# +# ./docker/test/stateful/setup_minio.sh ./tests/ +# + +set -e -x -a -u + +rpm2cpio ./minio-20220103182258.0.0.x86_64.rpm | cpio -i --make-directories +find -name minio +cp ./usr/local/bin/minio ./ + +ls -lha + +mkdir -p ./minio_data + +if [ ! -f ./minio ]; then + echo 'MinIO binary not found, downloading...' + + BINARY_TYPE=$(uname -s | tr '[:upper:]' '[:lower:]') + + wget "https://dl.min.io/server/minio/release/${BINARY_TYPE}-amd64/minio" \ + && chmod +x ./minio \ + && wget "https://dl.min.io/client/mc/release/${BINARY_TYPE}-amd64/mc" \ + && chmod +x ./mc +fi + +MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse} +MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse} + +./minio --version +./minio server --address ":11111" ./minio_data & + +i=0 +while ! curl -v --silent http://localhost:11111 2>&1 | grep AccessDenied +do + if [[ $i == 60 ]]; then + echo "Failed to setup minio" + exit 0 + fi + echo "Trying to connect to minio" + sleep 1 + i=$((i + 1)) +done + +lsof -i :11111 + +sleep 5 + +./mc alias set clickminio http://localhost:11111 clickhouse clickhouse +./mc admin user add clickminio test testtest +./mc admin policy set clickminio readwrite user=test +./mc mb clickminio/test + + +# Upload data to Minio. By default after unpacking all tests will in +# /usr/share/clickhouse-test/queries + +TEST_PATH=${1:-/usr/share/clickhouse-test} +MINIO_DATA_PATH=${TEST_PATH}/queries/1_stateful/data_minio + +# Iterating over globs will cause redudant FILE variale to be a path to a file, not a filename +# shellcheck disable=SC2045 +for FILE in $(ls "${MINIO_DATA_PATH}"); do + echo "$FILE"; + ./mc cp "${MINIO_DATA_PATH}"/"$FILE" clickminio/test/"$FILE"; +done + +mkdir -p ~/.aws +cat <> ~/.aws/credentials +[default] +aws_access_key_id=clickhouse +aws_secret_access_key=clickhouse +EOT diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index 35a6e9c365b9..9cfb55a2e51e 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 # docker build -t clickhouse/stateless-test . ARG FROM_TAG=latest -FROM clickhouse/test-base:$FROM_TAG +FROM altinityinfra/test-base:$FROM_TAG ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz" @@ -9,43 +9,45 @@ ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/down RUN apt-get update -y \ && env DEBIAN_FRONTEND=noninteractive \ apt-get install --yes --no-install-recommends \ - awscli \ - brotli \ - lz4 \ - expect \ - golang \ - lsof \ - mysql-client=8.0* \ - ncdu \ - netcat-openbsd \ - nodejs \ - npm \ - odbcinst \ - openjdk-11-jre-headless \ - openssl \ - postgresql-client \ - protobuf-compiler \ - python3 \ - python3-lxml \ - python3-pip \ - python3-requests \ - python3-termcolor \ - qemu-user-static \ - sqlite3 \ - sudo \ - tree \ - unixodbc \ - wget \ - rustc \ - cargo \ - zstd \ - file \ - pv \ - zip \ - p7zip-full \ + awscli='1.22.*' \ + brotli='1.0.*' \ + lz4='1.9.*' \ + expect='5.45.*' \ + golang='2:1.18*' \ + lsof='4.93.*' \ + mysql-client='8.0*' \ + ncdu='1.15.*' \ + netcat-openbsd='1.218*' \ + nodejs='12.22.*' \ + npm='8.5.*' \ + odbcinst='2.3.*' \ + openjdk-11-jre-headless='11.0.*' \ + openssl='3.0.*' \ + postgresql-client='14+*' \ + protobuf-compiler='3.12.*' \ + python3='3.10.*' \ + python3-lxml='4.8.*' \ + python3-pip='22.0.*' \ + python3-requests='2.25.*' \ + python3-termcolor='1.1.*' \ + qemu-user-static='1:6.2+*' \ + sqlite3='3.37.*' \ + sudo='1.9.*' \ + tree='2.0.*' \ + unixodbc='2.3.*' \ + wget='1.21.*' \ + rustc='1.70.*' \ + cargo='1.70.*' \ + zstd='1.4.*' \ + file='1:5.41-*' \ + pv='1.6.*' \ + zip='3.0*' \ + p7zip-full='16.02*' \ + rpm2cpio='4.17.*' \ + cpio='2.13+*' \ && apt-get clean -RUN pip3 install numpy scipy pandas Jinja2 +RUN pip3 install numpy=='1.26.*' scipy=='1.11.*' pandas=='2.1.*' Jinja2=='3.1.*' RUN mkdir -p /tmp/clickhouse-odbc-tmp \ && wget -nv -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \ @@ -81,8 +83,8 @@ ENV MINIO_ROOT_USER="clickhouse" ENV MINIO_ROOT_PASSWORD="clickhouse" ENV EXPORT_S3_STORAGE_POLICIES=1 -RUN npm install -g azurite \ - && npm install -g tslib +RUN npm install -g azurite@3.28.0 \ + && npm install -g tslib@2.6.2 COPY run.sh / COPY setup_minio.sh / diff --git a/docker/test/stateless/setup_minio.sh b/docker/test/stateless/setup_minio.sh index c756ce4669da..269c5055ea4f 100755 --- a/docker/test/stateless/setup_minio.sh +++ b/docker/test/stateless/setup_minio.sh @@ -72,6 +72,7 @@ download_minio() { start_minio() { mkdir -p ./minio_data ./minio --version + ./minio server --address ":11111" ./minio_data & wait_for_it lsof -i :11111 diff --git a/docker/test/stateless_pytest/Dockerfile b/docker/test/stateless_pytest/Dockerfile new file mode 100644 index 000000000000..c148b6212417 --- /dev/null +++ b/docker/test/stateless_pytest/Dockerfile @@ -0,0 +1,33 @@ +# rebuild in #33610 +# docker build -t clickhouse/stateless-pytest . +ARG FROM_TAG=latest +FROM altinityinfra/test-base:$FROM_TAG + +RUN apt-get update -y && \ + apt-get install -y --no-install-recommends \ + python3-pip \ + python3-setuptools \ + python3-wheel \ + brotli \ + netcat-openbsd \ + postgresql-client \ + zstd + +RUN python3 -m pip install \ + wheel \ + pytest \ + pytest-html \ + pytest-json \ + pytest-randomly \ + pytest-rerunfailures \ + pytest-timeout \ + pytest-xdist \ + pandas \ + numpy \ + scipy + +CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \ + dpkg -i package_folder/clickhouse-common-static-dbg_*.deb; \ + dpkg -i package_folder/clickhouse-server_*.deb; \ + dpkg -i package_folder/clickhouse-client_*.deb; \ + python3 -m pytest /usr/share/clickhouse-test/queries -n $(nproc) --reruns=1 --timeout=600 --json=test_output/report.json --html=test_output/report.html --self-contained-html diff --git a/docker/test/stress/Dockerfile b/docker/test/stress/Dockerfile index eddeb04758ba..69573839f653 100644 --- a/docker/test/stress/Dockerfile +++ b/docker/test/stress/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 # docker build -t clickhouse/stress-test . ARG FROM_TAG=latest -FROM clickhouse/stateful-test:$FROM_TAG +FROM altinityinfra/stateful-test:$FROM_TAG RUN apt-get update -y \ && env DEBIAN_FRONTEND=noninteractive \ diff --git a/docker/test/unit/Dockerfile b/docker/test/unit/Dockerfile index b75bfb6661cc..378341ab8b69 100644 --- a/docker/test/unit/Dockerfile +++ b/docker/test/unit/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 # docker build -t clickhouse/unit-test . ARG FROM_TAG=latest -FROM clickhouse/stateless-test:$FROM_TAG +FROM altinityinfra/stateless-test:$FROM_TAG RUN apt-get install gdb diff --git a/docker/test/upgrade/Dockerfile b/docker/test/upgrade/Dockerfile index 9152230af1cf..87fff020aecc 100644 --- a/docker/test/upgrade/Dockerfile +++ b/docker/test/upgrade/Dockerfile @@ -1,7 +1,7 @@ # rebuild in #33610 # docker build -t clickhouse/upgrade-check . ARG FROM_TAG=latest -FROM clickhouse/stateful-test:$FROM_TAG +FROM altinityinfra/stateful-test:$FROM_TAG RUN apt-get update -y \ && env DEBIAN_FRONTEND=noninteractive \ diff --git a/docker/test/util/Dockerfile b/docker/test/util/Dockerfile index 359041eed032..e92e729b97c4 100644 --- a/docker/test/util/Dockerfile +++ b/docker/test/util/Dockerfile @@ -10,15 +10,15 @@ ENV DEBIAN_FRONTEND=noninteractive LLVM_VERSION=16 RUN apt-get update \ && apt-get install \ - apt-transport-https \ - apt-utils \ - ca-certificates \ - curl \ - dnsutils \ - gnupg \ - iputils-ping \ - lsb-release \ - wget \ + apt-transport-https='2.4.*' \ + apt-utils='2.4.*' \ + ca-certificates='20230311ubuntu0.22.04.*' \ + curl='7.81.*' \ + dnsutils='1:9.18.18-0ubuntu0.22.04.*' \ + gnupg='2.2.*' \ + iputils-ping='3:20211215*' \ + lsb-release='11.1.*' \ + wget='1.21.*' \ --yes --no-install-recommends --verbose-versions \ && export LLVM_PUBKEY_HASH="bda960a8da687a275a2078d43c111d66b1c6a893a3275271beedf266c1ff4a0cdecb429c7a5cccf9f486ea7aa43fd27f" \ && wget -nv -O /tmp/llvm-snapshot.gpg.key https://apt.llvm.org/llvm-snapshot.gpg.key \ @@ -38,27 +38,27 @@ RUN curl -s https://apt.kitware.com/keys/kitware-archive-latest.asc | \ # initial packages RUN apt-get update \ && apt-get install \ - bash \ - bsdmainutils \ - build-essential \ + bash='5.1*' \ + bsdmainutils='12.1.*' \ + build-essential='12.9*' \ clang-${LLVM_VERSION} \ clang-tidy-${LLVM_VERSION} \ - cmake \ - gdb \ - git \ - gperf \ + cmake='3.*' \ + gdb='12.1*' \ + git='1:2.34.*' \ + gperf='3.1*' \ libclang-rt-${LLVM_VERSION}-dev \ lld-${LLVM_VERSION} \ llvm-${LLVM_VERSION} \ llvm-${LLVM_VERSION}-dev \ libclang-${LLVM_VERSION}-dev \ - moreutils \ - nasm \ - ninja-build \ - pigz \ - rename \ - software-properties-common \ - tzdata \ + moreutils='0.66*' \ + nasm='2.15.*' \ + ninja-build='1.10.*' \ + pigz='2.6*' \ + rename='1.30*' \ + software-properties-common='0.99.*' \ + tzdata='2023c-0ubuntu0.22.04.*' \ --yes --no-install-recommends \ && apt-get clean diff --git a/docs/en/engines/table-engines/mergetree-family/mergetree.md b/docs/en/engines/table-engines/mergetree-family/mergetree.md index 4f5061266827..26454957ce48 100644 --- a/docs/en/engines/table-engines/mergetree-family/mergetree.md +++ b/docs/en/engines/table-engines/mergetree-family/mergetree.md @@ -866,6 +866,7 @@ Tags: - `prefer_not_to_merge` — Disables merging of data parts on this volume. When this setting is enabled, merging data on this volume is not allowed. This allows controlling how ClickHouse works with slow disks. - `perform_ttl_move_on_insert` — Disables TTL move on data part INSERT. By default (if enabled) if we insert a data part that already expired by the TTL move rule it immediately goes to a volume/disk declared in move rule. This can significantly slowdown insert in case if destination volume/disk is slow (e.g. S3). If disabled then already expired data part is written into a default volume and then right after moved to TTL volume. - `load_balancing` - Policy for disk balancing, `round_robin` or `least_used`. +- `least_used_ttl_ms` - Configure timeout (in milliseconds) for the updating available space on all disks (`0` - update always, `-1` - never update, default is `60000`). Note, if the disk can be used by ClickHouse only and is not subject to a online filesystem resize/shrink you can use `-1`, in all other cases it is not recommended, since eventually it will lead to incorrect space distribution. Configuration examples: diff --git a/packages/clickhouse-client.yaml b/packages/clickhouse-client.yaml index 4d707b28ad90..059562835d8c 100644 --- a/packages/clickhouse-client.yaml +++ b/packages/clickhouse-client.yaml @@ -11,8 +11,8 @@ description: | arch: "${DEB_ARCH}" # amd64, arm64 platform: "linux" version: "${CLICKHOUSE_VERSION_STRING}" -vendor: "ClickHouse Inc." -homepage: "https://clickhouse.com" +vendor: "Altinity Inc." +homepage: "https://altinity.com/" license: "Apache" section: "database" priority: "optional" diff --git a/packages/clickhouse-common-static-dbg.yaml b/packages/clickhouse-common-static-dbg.yaml index 96de4c17d88f..63b95b034944 100644 --- a/packages/clickhouse-common-static-dbg.yaml +++ b/packages/clickhouse-common-static-dbg.yaml @@ -11,8 +11,8 @@ description: | arch: "${DEB_ARCH}" # amd64, arm64 platform: "linux" version: "${CLICKHOUSE_VERSION_STRING}" -vendor: "ClickHouse Inc." -homepage: "https://clickhouse.com" +vendor: "Altinity Inc." +homepage: "https://altinity.com/" license: "Apache" section: "database" priority: "optional" diff --git a/packages/clickhouse-common-static.yaml b/packages/clickhouse-common-static.yaml index 95532726d945..96dd2d890a19 100644 --- a/packages/clickhouse-common-static.yaml +++ b/packages/clickhouse-common-static.yaml @@ -11,8 +11,8 @@ description: | arch: "${DEB_ARCH}" # amd64, arm64 platform: "linux" version: "${CLICKHOUSE_VERSION_STRING}" -vendor: "ClickHouse Inc." -homepage: "https://clickhouse.com" +vendor: "Altinity Inc." +homepage: "https://altinity.com/" license: "Apache" section: "database" priority: "optional" @@ -34,8 +34,9 @@ suggests: contents: - src: root/usr/bin/clickhouse dst: /usr/bin/clickhouse -- src: root/usr/bin/clickhouse-diagnostics - dst: /usr/bin/clickhouse-diagnostics +# Excluded due to CVEs in go runtime that popup constantly +# - src: root/usr/bin/clickhouse-diagnostics +# dst: /usr/bin/clickhouse-diagnostics - src: root/usr/bin/clickhouse-extract-from-config dst: /usr/bin/clickhouse-extract-from-config - src: root/usr/bin/clickhouse-library-bridge diff --git a/packages/clickhouse-keeper-dbg.yaml b/packages/clickhouse-keeper-dbg.yaml index 28d53b39518d..c1c8a178ba74 100644 --- a/packages/clickhouse-keeper-dbg.yaml +++ b/packages/clickhouse-keeper-dbg.yaml @@ -11,8 +11,8 @@ description: | arch: "${DEB_ARCH}" # amd64, arm64 platform: "linux" version: "${CLICKHOUSE_VERSION_STRING}" -vendor: "ClickHouse Inc." -homepage: "https://clickhouse.com" +vendor: "Altinity Inc." +homepage: "https://altinity.com/" license: "Apache" section: "database" priority: "optional" diff --git a/packages/clickhouse-keeper.yaml b/packages/clickhouse-keeper.yaml index 9dad5382c082..f9780cd4ad9c 100644 --- a/packages/clickhouse-keeper.yaml +++ b/packages/clickhouse-keeper.yaml @@ -11,8 +11,8 @@ description: | arch: "${DEB_ARCH}" # amd64, arm64 platform: "linux" version: "${CLICKHOUSE_VERSION_STRING}" -vendor: "ClickHouse Inc." -homepage: "https://clickhouse.com" +vendor: "Altinity Inc." +homepage: "https://altinity.com/" license: "Apache" section: "database" priority: "optional" diff --git a/packages/clickhouse-server.yaml b/packages/clickhouse-server.yaml index 5e2bc7c74125..9a004c3eb1c6 100644 --- a/packages/clickhouse-server.yaml +++ b/packages/clickhouse-server.yaml @@ -11,8 +11,8 @@ description: | arch: "${DEB_ARCH}" # amd64, arm64 platform: "linux" version: "${CLICKHOUSE_VERSION_STRING}" -vendor: "ClickHouse Inc." -homepage: "https://clickhouse.com" +vendor: "Altinity Inc." +homepage: "https://altinity.com/" license: "Apache" section: "database" priority: "optional" diff --git a/programs/diagnostics/internal/collectors/system/system_test.go b/programs/diagnostics/internal/collectors/system/system_test.go index fb1e16bd1ed3..70e79bfc905f 100644 --- a/programs/diagnostics/internal/collectors/system/system_test.go +++ b/programs/diagnostics/internal/collectors/system/system_test.go @@ -55,21 +55,21 @@ func TestSystemCollect(t *testing.T) { memoryUsageFrames, err := countFrameRows(diagSet, "memory_usage") require.Greater(t, memoryUsageFrames, 0) require.Nil(t, err) - // cpu - require.Equal(t, []string{"processor", "vendor", "model", "core", "numThreads", "logical", "capabilities"}, diagSet.Frames["cpu"].Columns()) - cpuFrames, err := countFrameRows(diagSet, "cpu") - require.Greater(t, cpuFrames, 0) - require.Nil(t, err) - // processes - require.Equal(t, []string{"pid", "ppid", "stime", "time", "rss", "size", "faults", "minorFaults", "majorFaults", "user", "state", "priority", "nice", "command"}, diagSet.Frames["processes"].Columns()) - processesFrames, err := countFrameRows(diagSet, "processes") - require.Greater(t, processesFrames, 0) - require.Nil(t, err) - // os - require.Equal(t, []string{"hostname", "os", "goOs", "cpus", "core", "kernel", "platform"}, diagSet.Frames["os"].Columns()) - osFrames, err := countFrameRows(diagSet, "os") - require.Greater(t, osFrames, 0) - require.Nil(t, err) + // // cpu + // require.Equal(t, []string{"processor", "vendor", "model", "core", "numThreads", "logical", "capabilities"}, diagSet.Frames["cpu"].Columns()) + // cpuFrames, err := countFrameRows(diagSet, "cpu") + // require.Greater(t, cpuFrames, 0) + // require.Nil(t, err) + // // processes + // require.Equal(t, []string{"pid", "ppid", "stime", "time", "rss", "size", "faults", "minorFaults", "majorFaults", "user", "state", "priority", "nice", "command"}, diagSet.Frames["processes"].Columns()) + // processesFrames, err := countFrameRows(diagSet, "processes") + // require.Greater(t, processesFrames, 0) + // require.Nil(t, err) + // // os + // require.Equal(t, []string{"hostname", "os", "goOs", "cpus", "core", "kernel", "platform"}, diagSet.Frames["os"].Columns()) + // osFrames, err := countFrameRows(diagSet, "os") + // require.Greater(t, osFrames, 0) + // require.Nil(t, err) }) } diff --git a/programs/diagnostics/internal/platform/database/native_test.go b/programs/diagnostics/internal/platform/database/native_test.go index 7028a4b4800b..8f47824fc49c 100644 --- a/programs/diagnostics/internal/platform/database/native_test.go +++ b/programs/diagnostics/internal/platform/database/native_test.go @@ -28,7 +28,7 @@ func createClickHouseContainer(t *testing.T, ctx context.Context) (testcontainer // for now, we test against a hardcoded database-server version but we should make this a property req := testcontainers.ContainerRequest{ - Image: fmt.Sprintf("clickhouse/clickhouse-server:%s", test.GetClickHouseTestVersion()), + Image: fmt.Sprintf("altinityinfra/clickhouse-server:%s", test.GetClickHouseTestVersion()), ExposedPorts: []string{"9000/tcp"}, WaitingFor: wait.ForLog("Ready for connections"), Mounts: testcontainers.ContainerMounts{ @@ -107,7 +107,7 @@ func TestReadTable(t *testing.T) { require.Nil(t, err) require.True(t, ok) require.Equal(t, "default", values[0]) - require.Equal(t, "/var/lib/clickhouse/", values[1]) + require.Equal(t, "/var/lib/altinityinfra/", values[1]) require.Greater(t, values[2], uint64(0)) require.Greater(t, values[3], uint64(0)) require.Greater(t, values[4], uint64(0)) @@ -134,10 +134,10 @@ func TestReadTable(t *testing.T) { frame, err := clickhouseClient.ReadTable("system", "databases", []string{}, data.OrderBy{}, 10) require.Nil(t, err) require.ElementsMatch(t, frame.Columns(), [6]string{"name", "engine", "data_path", "metadata_path", "uuid", "comment"}) - expectedRows := [4][3]string{{"INFORMATION_SCHEMA", "Memory", "/var/lib/clickhouse/"}, - {"default", "Atomic", "/var/lib/clickhouse/store/"}, - {"information_schema", "Memory", "/var/lib/clickhouse/"}, - {"system", "Atomic", "/var/lib/clickhouse/store/"}} + expectedRows := [4][3]string{{"INFORMATION_SCHEMA", "Memory", "/var/lib/altinityinfra/"}, + {"default", "Atomic", "/var/lib/altinityinfra/store/"}, + {"information_schema", "Memory", "/var/lib/altinityinfra/"}, + {"system", "Atomic", "/var/lib/altinityinfra/store/"}} i := 0 for { values, ok, err := frame.Next() @@ -181,7 +181,7 @@ func TestReadTable(t *testing.T) { frame, err := clickhouseClient.ReadTable("system", "databases", []string{}, data.OrderBy{}, 1) require.Nil(t, err) require.ElementsMatch(t, frame.Columns(), [6]string{"name", "engine", "data_path", "metadata_path", "uuid", "comment"}) - expectedRows := [1][3]string{{"INFORMATION_SCHEMA", "Memory", "/var/lib/clickhouse/"}} + expectedRows := [1][3]string{{"INFORMATION_SCHEMA", "Memory", "/var/lib/altinityinfra/"}} i := 0 for { values, ok, err := frame.Next() @@ -216,10 +216,10 @@ func TestReadTable(t *testing.T) { require.Nil(t, err) require.ElementsMatch(t, frame.Columns(), [6]string{"name", "engine", "data_path", "metadata_path", "uuid", "comment"}) expectedRows := [4][3]string{ - {"default", "Atomic", "/var/lib/clickhouse/store/"}, - {"system", "Atomic", "/var/lib/clickhouse/store/"}, - {"INFORMATION_SCHEMA", "Memory", "/var/lib/clickhouse/"}, - {"information_schema", "Memory", "/var/lib/clickhouse/"}, + {"default", "Atomic", "/var/lib/altinityinfra/store/"}, + {"system", "Atomic", "/var/lib/altinityinfra/store/"}, + {"INFORMATION_SCHEMA", "Memory", "/var/lib/altinityinfra/"}, + {"information_schema", "Memory", "/var/lib/altinityinfra/"}, } i := 0 for { @@ -256,7 +256,7 @@ func TestExecuteStatement(t *testing.T) { require.Nil(t, err) require.ElementsMatch(t, frame.Columns(), [2]string{"path", "count"}) expectedRows := [1][2]interface{}{ - {"/var/lib/clickhouse/", uint64(1)}, + {"/var/lib/altinityinfra/", uint64(1)}, } i := 0 for { diff --git a/programs/diagnostics/internal/platform/manager_test.go b/programs/diagnostics/internal/platform/manager_test.go index e6c50c6e505a..980b461626a7 100644 --- a/programs/diagnostics/internal/platform/manager_test.go +++ b/programs/diagnostics/internal/platform/manager_test.go @@ -26,7 +26,7 @@ func createClickHouseContainer(t *testing.T, ctx context.Context) (testcontainer } // for now, we test against a hardcoded database-server version but we should make this a property req := testcontainers.ContainerRequest{ - Image: fmt.Sprintf("clickhouse/clickhouse-server:%s", test.GetClickHouseTestVersion()), + Image: fmt.Sprintf("altinityinfra/clickhouse-server:%s", test.GetClickHouseTestVersion()), ExposedPorts: []string{"9000/tcp"}, WaitingFor: wait.ForLog("Ready for connections"), Mounts: testcontainers.ContainerMounts{ diff --git a/programs/diagnostics/internal/platform/utils/process_test.go b/programs/diagnostics/internal/platform/utils/process_test.go index 9baaa5597522..41118576bd84 100644 --- a/programs/diagnostics/internal/platform/utils/process_test.go +++ b/programs/diagnostics/internal/platform/utils/process_test.go @@ -50,7 +50,7 @@ func TestFindClickHouseProcessesAndConfigs(t *testing.T) { // run a ClickHouse container that guarantees that it runs only for the duration of the test req := testcontainers.ContainerRequest{ - Image: fmt.Sprintf("clickhouse/clickhouse-server:%s", test.GetClickHouseTestVersion()), + Image: fmt.Sprintf("altinityinfra/clickhouse-server:%s", test.GetClickHouseTestVersion()), ExposedPorts: []string{"9000/tcp"}, WaitingFor: wait.ForLog("Ready for connections"), Mounts: testcontainers.ContainerMounts{ diff --git a/programs/diagnostics/internal/runner_test.go b/programs/diagnostics/internal/runner_test.go index 2369f8b3007d..17fa2f818401 100644 --- a/programs/diagnostics/internal/runner_test.go +++ b/programs/diagnostics/internal/runner_test.go @@ -36,7 +36,7 @@ func TestCapture(t *testing.T) { } // for now, we test against a hardcoded database-server version but we should make this a property req := testcontainers.ContainerRequest{ - Image: fmt.Sprintf("clickhouse/clickhouse-server:%s", test.GetClickHouseTestVersion()), + Image: fmt.Sprintf("altinityinfra/clickhouse-server:%s", test.GetClickHouseTestVersion()), ExposedPorts: []string{"9000/tcp"}, WaitingFor: wait.ForLog("Ready for connections"), Mounts: testcontainers.ContainerMounts{ diff --git a/src/Disks/StoragePolicy.cpp b/src/Disks/StoragePolicy.cpp index d66d493bb7ec..33a8719a9752 100644 --- a/src/Disks/StoragePolicy.cpp +++ b/src/Disks/StoragePolicy.cpp @@ -71,7 +71,8 @@ StoragePolicy::StoragePolicy( /* max_data_part_size_= */ 0, /* are_merges_avoided_= */ false, /* perform_ttl_move_on_insert_= */ true, - VolumeLoadBalancing::ROUND_ROBIN); + VolumeLoadBalancing::ROUND_ROBIN, + /* least_used_ttl_ms_= */ 60'000); volumes.emplace_back(std::move(default_volume)); } diff --git a/src/Disks/VolumeJBOD.cpp b/src/Disks/VolumeJBOD.cpp index 519f3378c4ce..682a167bf5f0 100644 --- a/src/Disks/VolumeJBOD.cpp +++ b/src/Disks/VolumeJBOD.cpp @@ -76,6 +76,7 @@ VolumeJBOD::VolumeJBOD( perform_ttl_move_on_insert = config.getBool(config_prefix + ".perform_ttl_move_on_insert", true); are_merges_avoided = config.getBool(config_prefix + ".prefer_not_to_merge", false); + least_used_ttl_ms = config.getUInt64(config_prefix + ".least_used_ttl_ms", 60'000); } VolumeJBOD::VolumeJBOD(const VolumeJBOD & volume_jbod, @@ -101,6 +102,11 @@ DiskPtr VolumeJBOD::getDisk(size_t /* index */) const case VolumeLoadBalancing::LEAST_USED: { std::lock_guard lock(mutex); + if (!least_used_ttl_ms || least_used_update_watch.elapsedMilliseconds() >= least_used_ttl_ms) + { + disks_by_size = LeastUsedDisksQueue(disks.begin(), disks.end()); + least_used_update_watch.restart(); + } return disks_by_size.top().disk; } } @@ -135,11 +141,23 @@ ReservationPtr VolumeJBOD::reserve(UInt64 bytes) { std::lock_guard lock(mutex); - DiskWithSize disk = disks_by_size.top(); - disks_by_size.pop(); + ReservationPtr reservation; + if (!least_used_ttl_ms || least_used_update_watch.elapsedMilliseconds() >= least_used_ttl_ms) + { + disks_by_size = LeastUsedDisksQueue(disks.begin(), disks.end()); + least_used_update_watch.restart(); + + DiskWithSize disk = disks_by_size.top(); + reservation = disk.reserve(bytes); + } + else + { + DiskWithSize disk = disks_by_size.top(); + disks_by_size.pop(); - ReservationPtr reservation = disk.reserve(bytes); - disks_by_size.push(disk); + reservation = disk.reserve(bytes); + disks_by_size.push(disk); + } return reservation; } diff --git a/src/Disks/VolumeJBOD.h b/src/Disks/VolumeJBOD.h index 8d270a6c71c7..b94e167fd862 100644 --- a/src/Disks/VolumeJBOD.h +++ b/src/Disks/VolumeJBOD.h @@ -5,6 +5,9 @@ #include #include +#include +#include +#include namespace DB @@ -23,9 +26,10 @@ using VolumesJBOD = std::vector; class VolumeJBOD : public IVolume { public: - VolumeJBOD(String name_, Disks disks_, UInt64 max_data_part_size_, bool are_merges_avoided_, bool perform_ttl_move_on_insert_, VolumeLoadBalancing load_balancing_) + VolumeJBOD(String name_, Disks disks_, UInt64 max_data_part_size_, bool are_merges_avoided_, bool perform_ttl_move_on_insert_, VolumeLoadBalancing load_balancing_, UInt64 least_used_ttl_ms_) : IVolume(name_, disks_, max_data_part_size_, perform_ttl_move_on_insert_, load_balancing_) , are_merges_avoided(are_merges_avoided_) + , least_used_ttl_ms(least_used_ttl_ms_) { } @@ -70,7 +74,7 @@ class VolumeJBOD : public IVolume DiskPtr disk; std::optional free_size = 0; - DiskWithSize(DiskPtr disk_) + explicit DiskWithSize(DiskPtr disk_) : disk(disk_) , free_size(disk->getUnreservedSpace()) {} @@ -97,7 +101,10 @@ class VolumeJBOD : public IVolume /// Index of last used disk, for load_balancing=round_robin mutable std::atomic last_used = 0; /// Priority queue of disks sorted by size, for load_balancing=least_used - mutable std::priority_queue disks_by_size; + using LeastUsedDisksQueue = std::priority_queue; + mutable LeastUsedDisksQueue disks_by_size TSA_GUARDED_BY(mutex); + mutable Stopwatch least_used_update_watch TSA_GUARDED_BY(mutex); + UInt64 least_used_ttl_ms = 0; /// True if parts on this volume participate in merges according to START/STOP MERGES ON VOLUME. std::atomic> are_merges_avoided_user_override{std::nullopt}; diff --git a/src/Processors/QueryPlan/PartsSplitter.cpp b/src/Processors/QueryPlan/PartsSplitter.cpp index 61c6422de5aa..9e8340ef43da 100644 --- a/src/Processors/QueryPlan/PartsSplitter.cpp +++ b/src/Processors/QueryPlan/PartsSplitter.cpp @@ -14,6 +14,7 @@ #include #include #include +#include using namespace DB; @@ -78,7 +79,25 @@ std::pair, std::vector> split(RangesInDat RangeEnd, }; - [[ maybe_unused ]] bool operator<(const PartsRangesIterator & other) const { return std::tie(value, event) > std::tie(other.value, other.event); } + [[maybe_unused]] bool operator<(const PartsRangesIterator & other) const + { + // Accurate comparison of `value > other.value` + for (size_t i = 0; i < value.size(); ++i) + { + if (applyVisitor(FieldVisitorAccurateLess(), value[i], other.value[i])) + return false; + + if (!applyVisitor(FieldVisitorAccurateEquals(), value[i], other.value[i])) + return true; + } + + /// Within the same part we should process events in order of mark numbers, + /// because they already ordered by value and range ends have greater mark numbers than the beginnings. + /// Otherwise we could get invalid ranges with the right bound that is less than the left bound. + const auto ev_mark = event == EventType::RangeStart ? range.begin : range.end; + const auto other_ev_mark = other.event == EventType::RangeStart ? other.range.begin : other.range.end; + return ev_mark > other_ev_mark; + } Values value; MarkRangeWithPartIdx range; diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index c5d8986af26d..0baa9f833e20 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -1148,7 +1148,7 @@ void AlterCommands::validate(const StoragePtr & table, ContextPtr context) const /// So we don't allow to do it for now. if (command.data_type) { - const GetColumnsOptions options(GetColumnsOptions::AllPhysical); + const GetColumnsOptions options(GetColumnsOptions::All); const auto old_data_type = all_columns.getColumn(options, column_name).type; bool new_type_has_object = command.data_type->hasDynamicSubcolumns(); diff --git a/src/Storages/Kafka/StorageKafka.cpp b/src/Storages/Kafka/StorageKafka.cpp index 43a3bedfb74b..3b6a712b8423 100644 --- a/src/Storages/Kafka/StorageKafka.cpp +++ b/src/Storages/Kafka/StorageKafka.cpp @@ -660,10 +660,19 @@ void StorageKafka::updateConfiguration(cppkafka::Configuration & kafka_config, if (kafka_consumer_weak_ptr_ptr) { + /// NOTE: statistics should be consumed, otherwise it creates too much + /// entries in the queue, that leads to memory leak and slow shutdown. + /// + /// This is the case when you have kafka table but no SELECT from it or + /// materialized view attached. + /// + /// So for now it is disabled by default, until properly fixed. +#if 0 if (!config.has(config_prefix + "." + "statistics_interval_ms")) { kafka_config.set("statistics.interval.ms", "3000"); // every 3 seconds by default. set to 0 to disable. } +#endif if (kafka_config.get("statistics.interval.ms") != "0") { diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index e10c83e0d458..a5af15766cfa 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -1262,6 +1262,14 @@ bool KeyCondition::tryPrepareSetIndex( if (!future_set) return false; + const auto set_types = future_set->getTypes(); + size_t set_types_size = set_types.size(); + size_t indexes_mapping_size = indexes_mapping.size(); + + /// When doing strict matches, we have to check all elements in set. + if (strict && indexes_mapping_size < set_types_size) + return false; + auto prepared_set = future_set->buildOrderedSetInplace(right_arg.getTreeContext().getQueryContext()); if (!prepared_set) return false; diff --git a/src/Storages/StorageMerge.cpp b/src/Storages/StorageMerge.cpp index 272f35303bd1..7d666cc4937a 100644 --- a/src/Storages/StorageMerge.cpp +++ b/src/Storages/StorageMerge.cpp @@ -1,48 +1,55 @@ -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include -#include #include -#include +#include #include #include -#include -#include +#include #include +#include +#include #include -#include -#include -#include -#include +#include #include -#include -#include #include -#include -#include -#include -#include -#include -#include "DataTypes/IDataType.h" -#include -#include +#include +#include +#include +#include #include #include -#include -#include -#include #include +#include +#include #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include -#include - +#include +#include +#include +#include namespace { @@ -398,6 +405,7 @@ ReadFromMerge::ReadFromMerge( , context(std::move(context_)) , common_processed_stage(processed_stage) { + createChildPlans(); } void ReadFromMerge::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) @@ -408,6 +416,65 @@ void ReadFromMerge::initializePipeline(QueryPipelineBuilder & pipeline, const Bu return; } + QueryPlanResourceHolder resources; + std::vector> pipelines; + + chassert(selected_tables.size() == child_plans.size()); + chassert(selected_tables.size() == table_aliases.size()); + auto table_it = selected_tables.begin(); + for (size_t i = 0; i < selected_tables.size(); ++i, ++table_it) + { + auto & plan = child_plans.at(i); + const auto & table = *table_it; + + const auto storage = std::get<1>(table); + const auto storage_metadata_snapshot = storage->getInMemoryMetadataPtr(); + const auto nested_storage_snaphsot = storage->getStorageSnapshot(storage_metadata_snapshot, context); + + auto modified_query_info = getModifiedQueryInfo(query_info, context, table, nested_storage_snaphsot); + + auto source_pipeline = createSources( + plan, nested_storage_snaphsot, modified_query_info, common_processed_stage, common_header, table_aliases.at(i), table, context); + + if (source_pipeline && source_pipeline->initialized()) + { + resources.storage_holders.push_back(std::get<1>(table)); + resources.table_locks.push_back(std::get<2>(table)); + + pipelines.emplace_back(std::move(source_pipeline)); + } + } + + if (pipelines.empty()) + { + pipeline.init(Pipe(std::make_shared(output_stream->header))); + return; + } + + pipeline = QueryPipelineBuilder::unitePipelines(std::move(pipelines)); + + if (!query_info.input_order_info) + { + size_t tables_count = selected_tables.size(); + Float64 num_streams_multiplier = std::min( + static_cast(tables_count), + std::max(1UL, static_cast(context->getSettingsRef().max_streams_multiplier_for_merge_tables))); + size_t num_streams = static_cast(requested_num_streams * num_streams_multiplier); + + // It's possible to have many tables read from merge, resize(num_streams) might open too many files at the same time. + // Using narrowPipe instead. But in case of reading in order of primary key, we cannot do it, + // because narrowPipe doesn't preserve order. + pipeline.narrow(num_streams); + } + + pipeline.addResources(std::move(resources)); +} + +void ReadFromMerge::createChildPlans() +{ + if (selected_tables.empty()) + return; + size_t tables_count = selected_tables.size(); Float64 num_streams_multiplier = std::min(static_cast(tables_count), std::max(1UL, static_cast(context->getSettingsRef().max_streams_multiplier_for_merge_tables))); @@ -438,11 +505,6 @@ void ReadFromMerge::initializePipeline(QueryPipelineBuilder & pipeline, const Bu query_info.input_order_info = input_sorting_info; } - auto sample_block = merge_storage_snapshot->getMetadataForQuery()->getSampleBlock(); - - std::vector> pipelines; - QueryPlanResourceHolder resources; - for (const auto & table : selected_tables) { size_t current_need_streams = tables_count >= num_streams ? 1 : (num_streams / tables_count); @@ -460,7 +522,7 @@ void ReadFromMerge::initializePipeline(QueryPipelineBuilder & pipeline, const Bu if (sampling_requested && !storage->supportsSampling()) throw Exception(ErrorCodes::SAMPLING_NOT_SUPPORTED, "Illegal SAMPLE: table doesn't support sampling"); - Aliases aliases; + auto & aliases = table_aliases.emplace_back(); auto storage_metadata_snapshot = storage->getInMemoryMetadataPtr(); auto nested_storage_snaphsot = storage->getStorageSnapshot(storage_metadata_snapshot, context); @@ -479,6 +541,8 @@ void ReadFromMerge::initializePipeline(QueryPipelineBuilder & pipeline, const Bu ASTPtr required_columns_expr_list = std::make_shared(); ASTPtr column_expr; + auto sample_block = merge_storage_snapshot->getMetadataForQuery()->getSampleBlock(); + for (const auto & column : column_names) { const auto column_default = storage_columns.getDefault(column); @@ -515,42 +579,16 @@ void ReadFromMerge::initializePipeline(QueryPipelineBuilder & pipeline, const Bu } } - auto source_pipeline = createSources( + child_plans.emplace_back(createPlanForTable( nested_storage_snaphsot, modified_query_info, common_processed_stage, required_max_block_size, - common_header, - aliases, table, column_names_as_aliases.empty() ? column_names : column_names_as_aliases, context, - current_streams); - - if (source_pipeline && source_pipeline->initialized()) - { - resources.storage_holders.push_back(std::get<1>(table)); - resources.table_locks.push_back(std::get<2>(table)); - - pipelines.emplace_back(std::move(source_pipeline)); - } + current_streams)); } - - if (pipelines.empty()) - { - pipeline.init(Pipe(std::make_shared(output_stream->header))); - return; - } - - pipeline = QueryPipelineBuilder::unitePipelines(std::move(pipelines)); - - if (!query_info.input_order_info) - // It's possible to have many tables read from merge, resize(num_streams) might open too many files at the same time. - // Using narrowPipe instead. But in case of reading in order of primary key, we cannot do it, - // because narrowPipe doesn't preserve order. - pipeline.narrow(num_streams); - - pipeline.addResources(std::move(resources)); } SelectQueryInfo ReadFromMerge::getModifiedQueryInfo(const SelectQueryInfo & query_info, @@ -616,23 +654,121 @@ SelectQueryInfo ReadFromMerge::getModifiedQueryInfo(const SelectQueryInfo & quer return modified_query_info; } +bool recursivelyApplyToReadingSteps(QueryPlan::Node * node, const std::function & func) +{ + bool ok = true; + for (auto * child : node->children) + ok &= recursivelyApplyToReadingSteps(child, func); + + // This code is mainly meant to be used to call `requestReadingInOrder` on child steps. + // In this case it is ok if one child will read in order and other will not (though I don't know when it is possible), + // the only important part is to acknowledge this at the parent and don't rely on any particular ordering of input data. + if (!ok) + return false; + + if (auto * read_from_merge_tree = typeid_cast(node->step.get())) + ok &= func(*read_from_merge_tree); + + return ok; +} + QueryPipelineBuilderPtr ReadFromMerge::createSources( + QueryPlan & plan, const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & modified_query_info, const QueryProcessingStage::Enum & processed_stage, - const UInt64 max_block_size, const Block & header, const Aliases & aliases, const StorageWithLockAndName & storage_with_lock, + ContextMutablePtr modified_context, + bool concat_streams) const +{ + if (!plan.isInitialized()) + return std::make_unique(); + + QueryPipelineBuilderPtr builder; + + const auto & [database_name, storage, _, table_name] = storage_with_lock; + bool allow_experimental_analyzer = modified_context->getSettingsRef().allow_experimental_analyzer; + auto storage_stage + = storage->getQueryProcessingStage(modified_context, QueryProcessingStage::Complete, storage_snapshot, modified_query_info); + + builder = plan.buildQueryPipeline( + QueryPlanOptimizationSettings::fromContext(modified_context), BuildQueryPipelineSettings::fromContext(modified_context)); + + if (processed_stage > storage_stage || (allow_experimental_analyzer && processed_stage != QueryProcessingStage::FetchColumns)) + { + /** Materialization is needed, since from distributed storage the constants come materialized. + * If you do not do this, different types (Const and non-Const) columns will be produced in different threads, + * And this is not allowed, since all code is based on the assumption that in the block stream all types are the same. + */ + builder->addSimpleTransform([](const Block & stream_header) { return std::make_shared(stream_header); }); + } + + if (builder->initialized()) + { + if (concat_streams && builder->getNumStreams() > 1) + { + // It's possible to have many tables read from merge, resize(1) might open too many files at the same time. + // Using concat instead. + builder->addTransform(std::make_shared(builder->getHeader(), builder->getNumStreams())); + } + + /// Add virtual columns if we don't already have them. + + Block pipe_header = builder->getHeader(); + + if (has_database_virtual_column && !pipe_header.has("_database")) + { + ColumnWithTypeAndName column; + column.name = "_database"; + column.type = std::make_shared(std::make_shared()); + column.column = column.type->createColumnConst(0, Field(database_name)); + + auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); + auto adding_column_actions = std::make_shared( + std::move(adding_column_dag), ExpressionActionsSettings::fromContext(modified_context, CompileExpressions::yes)); + + builder->addSimpleTransform([&](const Block & stream_header) + { return std::make_shared(stream_header, adding_column_actions); }); + } + + if (has_table_virtual_column && !pipe_header.has("_table")) + { + ColumnWithTypeAndName column; + column.name = "_table"; + column.type = std::make_shared(std::make_shared()); + column.column = column.type->createColumnConst(0, Field(table_name)); + + auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); + auto adding_column_actions = std::make_shared( + std::move(adding_column_dag), ExpressionActionsSettings::fromContext(modified_context, CompileExpressions::yes)); + + builder->addSimpleTransform([&](const Block & stream_header) + { return std::make_shared(stream_header, adding_column_actions); }); + } + + /// Subordinary tables could have different but convertible types, like numeric types of different width. + /// We must return streams with structure equals to structure of Merge table. + convertingSourceStream(header, storage_snapshot->metadata, aliases, modified_context, *builder, processed_stage); + } + + return builder; +} + +QueryPlan ReadFromMerge::createPlanForTable( + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & modified_query_info, + const QueryProcessingStage::Enum & processed_stage, + UInt64 max_block_size, + const StorageWithLockAndName & storage_with_lock, Names real_column_names, ContextMutablePtr modified_context, - size_t streams_num, - bool concat_streams) + size_t streams_num) { const auto & [database_name, storage, _, table_name] = storage_with_lock; auto & modified_select = modified_query_info.query->as(); - QueryPipelineBuilderPtr builder; if (!InterpreterSelectQuery::isQueryWithFinal(modified_query_info) && storage->needRewriteQueryWithFinal(real_column_names)) { /// NOTE: It may not work correctly in some cases, because query was analyzed without final. @@ -647,14 +783,14 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( storage_snapshot, modified_query_info); + QueryPlan plan; + if (processed_stage <= storage_stage || (allow_experimental_analyzer && processed_stage == QueryProcessingStage::FetchColumns)) { /// If there are only virtual columns in query, you must request at least one other column. if (real_column_names.empty()) real_column_names.push_back(ExpressionActions::getSmallestColumn(storage_snapshot->metadata->getColumns().getAllPhysical()).name); - QueryPlan & plan = child_plans.emplace_back(); - StorageView * view = dynamic_cast(storage.get()); if (!view || allow_experimental_analyzer) { @@ -688,16 +824,7 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( if (!plan.isInitialized()) return {}; - if (auto * read_from_merge_tree = typeid_cast(plan.getRootNode()->step.get())) - { - size_t filters_dags_size = filter_dags.size(); - for (size_t i = 0; i < filters_dags_size; ++i) - read_from_merge_tree->addFilter(filter_dags[i], filter_nodes.nodes[i]); - } - - builder = plan.buildQueryPipeline( - QueryPlanOptimizationSettings::fromContext(modified_context), - BuildQueryPipelineSettings::fromContext(modified_context)); + applyFilters(plan); } else if (processed_stage > storage_stage || (allow_experimental_analyzer && processed_stage != QueryProcessingStage::FetchColumns)) { @@ -705,15 +832,14 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( modified_context->setSetting("max_threads", streams_num); modified_context->setSetting("max_streams_to_max_threads_ratio", 1); - QueryPlan & plan = child_plans.emplace_back(); - if (allow_experimental_analyzer) { InterpreterSelectQueryAnalyzer interpreter(modified_query_info.query_tree, modified_context, SelectQueryOptions(processed_stage).ignoreProjections()); - builder = std::make_unique(interpreter.buildQueryPipeline()); - plan = std::move(interpreter.getPlanner()).extractQueryPlan(); + auto & planner = interpreter.getPlanner(); + planner.buildQueryPlanIfNeeded(); + plan = std::move(planner).extractQueryPlan(); } else { @@ -722,71 +848,11 @@ QueryPipelineBuilderPtr ReadFromMerge::createSources( InterpreterSelectQuery interpreter{modified_query_info.query, modified_context, SelectQueryOptions(processed_stage).ignoreProjections()}; - builder = std::make_unique(interpreter.buildQueryPipeline(plan)); - } - - /** Materialization is needed, since from distributed storage the constants come materialized. - * If you do not do this, different types (Const and non-Const) columns will be produced in different threads, - * And this is not allowed, since all code is based on the assumption that in the block stream all types are the same. - */ - builder->addSimpleTransform([](const Block & stream_header) { return std::make_shared(stream_header); }); - } - - if (builder->initialized()) - { - if (concat_streams && builder->getNumStreams() > 1) - { - // It's possible to have many tables read from merge, resize(1) might open too many files at the same time. - // Using concat instead. - builder->addTransform(std::make_shared(builder->getHeader(), builder->getNumStreams())); - } - - /// Add virtual columns if we don't already have them. - - Block pipe_header = builder->getHeader(); - - if (has_database_virtual_column && !pipe_header.has("_database")) - { - ColumnWithTypeAndName column; - column.name = "_database"; - column.type = std::make_shared(std::make_shared()); - column.column = column.type->createColumnConst(0, Field(database_name)); - - auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); - auto adding_column_actions = std::make_shared( - std::move(adding_column_dag), - ExpressionActionsSettings::fromContext(modified_context, CompileExpressions::yes)); - - builder->addSimpleTransform([&](const Block & stream_header) - { - return std::make_shared(stream_header, adding_column_actions); - }); - } - - if (has_table_virtual_column && !pipe_header.has("_table")) - { - ColumnWithTypeAndName column; - column.name = "_table"; - column.type = std::make_shared(std::make_shared()); - column.column = column.type->createColumnConst(0, Field(table_name)); - - auto adding_column_dag = ActionsDAG::makeAddingColumnActions(std::move(column)); - auto adding_column_actions = std::make_shared( - std::move(adding_column_dag), - ExpressionActionsSettings::fromContext(modified_context, CompileExpressions::yes)); - - builder->addSimpleTransform([&](const Block & stream_header) - { - return std::make_shared(stream_header, adding_column_actions); - }); + interpreter.buildQueryPlan(plan); } - - /// Subordinary tables could have different but convertible types, like numeric types of different width. - /// We must return streams with structure equals to structure of Merge table. - convertingSourceStream(header, storage_snapshot->metadata, aliases, modified_context, *builder, processed_stage); } - return builder; + return plan; } StorageMerge::StorageListWithLocks StorageMerge::getSelectedTables( @@ -1014,10 +1080,47 @@ bool ReadFromMerge::requestReadingInOrder(InputOrderInfoPtr order_info_) if (order_info_->direction != 1 && InterpreterSelectQuery::isQueryWithFinal(query_info)) return false; + auto request_read_in_order = [order_info_](ReadFromMergeTree & read_from_merge_tree) + { + return read_from_merge_tree.requestReadingInOrder( + order_info_->used_prefix_of_sorting_key_size, order_info_->direction, order_info_->limit); + }; + + bool ok = true; + for (const auto & plan : child_plans) + if (plan.isInitialized()) + ok &= recursivelyApplyToReadingSteps(plan.getRootNode(), request_read_in_order); + + if (!ok) + return false; + order_info = order_info_; + query_info.input_order_info = order_info; return true; } +void ReadFromMerge::applyFilters(const QueryPlan & plan) const +{ + auto apply_filters = [this](ReadFromMergeTree & read_from_merge_tree) + { + size_t filters_dags_size = filter_dags.size(); + for (size_t i = 0; i < filters_dags_size; ++i) + read_from_merge_tree.addFilter(filter_dags[i], filter_nodes.nodes[i]); + + read_from_merge_tree.applyFilters(); + return true; + }; + + recursivelyApplyToReadingSteps(plan.getRootNode(), apply_filters); +} + +void ReadFromMerge::applyFilters() +{ + for (const auto & plan : child_plans) + if (plan.isInitialized()) + applyFilters(plan); +} + IStorage::ColumnSizeByName StorageMerge::getColumnSizes() const { ColumnSizeByName column_sizes; diff --git a/src/Storages/StorageMerge.h b/src/Storages/StorageMerge.h index babf0dd92e86..80a5fa335f72 100644 --- a/src/Storages/StorageMerge.h +++ b/src/Storages/StorageMerge.h @@ -1,9 +1,10 @@ #pragma once -#include -#include -#include +#include #include +#include +#include +#include namespace DB @@ -146,6 +147,8 @@ class ReadFromMerge final : public SourceStepWithFilter /// Returns `false` if requested reading cannot be performed. bool requestReadingInOrder(InputOrderInfoPtr order_info_); + void applyFilters() override; + private: const size_t required_max_block_size; const size_t requested_num_streams; @@ -177,23 +180,37 @@ class ReadFromMerge final : public SourceStepWithFilter using Aliases = std::vector; - static SelectQueryInfo getModifiedQueryInfo(const SelectQueryInfo & query_info, - const ContextPtr & modified_context, - const StorageWithLockAndName & storage_with_lock_and_name, - const StorageSnapshotPtr & storage_snapshot); + std::vector table_aliases; - QueryPipelineBuilderPtr createSources( + void createChildPlans(); + + void applyFilters(const QueryPlan & plan) const; + + QueryPlan createPlanForTable( const StorageSnapshotPtr & storage_snapshot, SelectQueryInfo & query_info, const QueryProcessingStage::Enum & processed_stage, UInt64 max_block_size, + const StorageWithLockAndName & storage_with_lock, + Names real_column_names, + ContextMutablePtr modified_context, + size_t streams_num); + + QueryPipelineBuilderPtr createSources( + QueryPlan & plan, + const StorageSnapshotPtr & storage_snapshot, + SelectQueryInfo & modified_query_info, + const QueryProcessingStage::Enum & processed_stage, const Block & header, const Aliases & aliases, const StorageWithLockAndName & storage_with_lock, - Names real_column_names, ContextMutablePtr modified_context, - size_t streams_num, - bool concat_streams = false); + bool concat_streams = false) const; + + static SelectQueryInfo getModifiedQueryInfo(const SelectQueryInfo & query_info, + const ContextPtr & modified_context, + const StorageWithLockAndName & storage_with_lock_and_name, + const StorageSnapshotPtr & storage_snapshot); static void convertingSourceStream( const Block & header, diff --git a/tests/ci/ast_fuzzer_check.py b/tests/ci/ast_fuzzer_check.py index 620462991efc..918526f051e4 100644 --- a/tests/ci/ast_fuzzer_check.py +++ b/tests/ci/ast_fuzzer_check.py @@ -32,7 +32,7 @@ from tee_popen import TeePopen from upload_result_helper import upload_results -IMAGE_NAME = "clickhouse/fuzzer" +IMAGE_NAME = "altinityinfra/fuzzer" def get_run_command( @@ -204,7 +204,7 @@ def main(): check_name, ) - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) logging.info("Result: '%s', '%s', '%s'", status, description, report_url) print(f"::notice ::Report url: {report_url}") diff --git a/tests/ci/build_check.py b/tests/ci/build_check.py index 8036cb5bae7a..2ea3ae75a0ee 100644 --- a/tests/ci/build_check.py +++ b/tests/ci/build_check.py @@ -6,17 +6,20 @@ import logging import sys import time +from shutil import rmtree from ci_config import CI_CONFIG, BuildConfig from ccache_utils import CargoCache from docker_pull_helper import get_image_with_version from env_helper import ( + CACHES_PATH, GITHUB_JOB_API_URL, IMAGES_PATH, REPO_COPY, S3_BUILDS_BUCKET, S3_DOWNLOAD, TEMP_PATH, + CLICKHOUSE_STABLE_VERSION_SUFFIX, ) from git_helper import Git, git_runner from pr_info import PRInfo @@ -35,8 +38,10 @@ get_instance_type, ) from stopwatch import Stopwatch +from ccache_utils import get_ccache_if_not_exists, upload_ccache -IMAGE_NAME = "clickhouse/binary-builder" + +IMAGE_NAME = "altinityinfra/binary-builder" BUILD_LOG_NAME = "build_log.log" @@ -57,6 +62,7 @@ def get_packager_cmd( cargo_cache_dir: Path, build_version: str, image_version: str, + ccache_path: str, official: bool, ) -> str: package_type = build_config.package_type @@ -74,7 +80,9 @@ def get_packager_cmd( if build_config.tidy: cmd += " --clang-tidy" - cmd += " --cache=sccache" + # NOTE(vnemkov): we are going to continue to use ccache for now + cmd += " --cache=ccache" + cmd += f" --ccache-dir={ccache_path}" cmd += " --s3-rw-access" cmd += f" --s3-bucket={S3_BUILDS_BUCKET}" cmd += f" --cargo-cache-dir={cargo_cache_dir}" @@ -248,16 +256,18 @@ def main(): logging.info("Got version from repo %s", version.string) - official_flag = pr_info.number == 0 - - version_type = "testing" - if "release" in pr_info.labels or "release-lts" in pr_info.labels: - version_type = "stable" - official_flag = True - - update_version_local(version, version_type) + official_flag = True + # version._flavour = version_type = CLICKHOUSE_STABLE_VERSION_SUFFIX + # TODO (vnemkov): right now we'll use simplified version management: + # only update git hash and explicitly set stable version suffix. + # official_flag = pr_info.number == 0 + # version_type = "testing" + # if "release" in pr_info.labels or "release-lts" in pr_info.labels: + # version_type = CLICKHOUSE_STABLE_VERSION_SUFFIX + # official_flag = True + # update_version_local(version, version_type) - logging.info("Updated local files with version") + logging.info(f"Updated local files with version : {version.string} / {version.describe}") logging.info("Build short name %s", build_name) @@ -268,6 +278,24 @@ def main(): ) cargo_cache.download() + # NOTE(vnemkov): since we still want to use CCACHE over SCCACHE, unlike upstream, + # we need to create local directory for that, just as with 22.8 + ccache_path = Path(CACHES_PATH, build_name + "_ccache") + + logging.info("Will try to fetch cache for our build") + try: + get_ccache_if_not_exists( + ccache_path, s3_helper, pr_info.number, temp_path, pr_info.release_pr + ) + except Exception as e: + # In case there are issues with ccache, remove the path and do not fail a build + logging.info("Failed to get ccache, building without it. Error: %s", e) + rmtree(ccache_path, ignore_errors=True) + + if not ccache_path.exists(): + logging.info("cache was not fetched, will create empty dir") + ccache_path.mkdir(parents=True) + packager_cmd = get_packager_cmd( build_config, repo_path / "docker" / "packager", @@ -275,6 +303,7 @@ def main(): cargo_cache.directory, version.string, image_version, + ccache_path, official_flag, ) @@ -291,6 +320,7 @@ def main(): subprocess.check_call( f"sudo chown -R ubuntu:ubuntu {build_output_path}", shell=True ) + subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {ccache_path}", shell=True) logging.info("Build finished as %s, log path %s", build_status, log_path) if build_status == SUCCESS: cargo_cache.upload() @@ -304,6 +334,10 @@ def main(): ) sys.exit(1) + # Upload the ccache first to have the least build time in case of problems + logging.info("Will upload cache") + upload_ccache(ccache_path, s3_helper, pr_info.number, temp_path) + # FIXME performance performance_urls = [] performance_path = build_output_path / "performance.tar.zst" @@ -335,11 +369,26 @@ def main(): log_path, s3_path_prefix + "/" + log_path.name ) logging.info("Log url %s", log_url) + print(f"::notice ::Log URL: {log_url}") else: logging.info("Build log doesn't exist") + print("Build log doesn't exist") print(f"::notice ::Log URL: {log_url}") + src_path = temp_path / "build_source.src.tar.gz" + s3_path = s3_path_prefix + "/clickhouse-" + version.string + ".src.tar.gz" + logging.info("s3_path %s", s3_path) + if src_path.exists(): + src_url = s3_helper.upload_build_file_to_s3( + src_path, s3_path + ) + logging.info("Source tar %s", src_url) + print(f"::notice ::Source tar URL: {src_url}") + else: + logging.info("Source tar doesn't exist") + print("Source tar doesn't exist") + build_result = BuildResult( build_name, log_url, @@ -442,7 +491,7 @@ def main(): log_url, f"Build ({build_name})", ) - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) # Fail the build job if it didn't succeed if build_status != SUCCESS: diff --git a/tests/ci/ccache_utils.py b/tests/ci/ccache_utils.py index 75a026d2524d..0a04b6f9e3a5 100644 --- a/tests/ci/ccache_utils.py +++ b/tests/ci/ccache_utils.py @@ -4,13 +4,13 @@ import os import shutil from hashlib import md5 +from env_helper import S3_BUILDS_BUCKET, S3_DOWNLOAD from pathlib import Path import requests # type: ignore from build_download_helper import download_build_with_progress, DownloadException from compress_files import decompress_fast, compress_fast -from env_helper import S3_DOWNLOAD, S3_BUILDS_BUCKET from git_helper import git_runner from s3_helper import S3Helper diff --git a/tests/ci/ci_config.py b/tests/ci/ci_config.py index 0f834ac39755..a60932ec9cf1 100644 --- a/tests/ci/ci_config.py +++ b/tests/ci/ci_config.py @@ -229,6 +229,7 @@ def validate(self) -> None: "package_ubsan", "package_tsan", "package_msan", + "package_tsan", "package_debug", "binary_release", ], @@ -315,11 +316,11 @@ def validate(self) -> None: "SQLancer (debug)": TestConfig("package_debug"), "Sqllogic test (release)": TestConfig("package_release"), "SQLTest": TestConfig("package_release"), + "Sign release (actions)": TestConfig("package_release"), }, ) CI_CONFIG.validate() - # checks required by Mergeable Check REQUIRED_CHECKS = [ "ClickHouse build check", diff --git a/tests/ci/clickhouse_helper.py b/tests/ci/clickhouse_helper.py index dac733805394..90a40fbaf737 100644 --- a/tests/ci/clickhouse_helper.py +++ b/tests/ci/clickhouse_helper.py @@ -170,19 +170,19 @@ def select_json_each_row(self, db, query, query_params=None): # Obtain the machine type from IMDS: def get_instance_type(): - url = "http://169.254.169.254/latest/meta-data/instance-type" - for i in range(5): - try: - response = requests.get(url, timeout=1) - if response.status_code == 200: - return response.text - except Exception as e: - error = ( - f"Received exception while sending data to {url} on {i} attempt: {e}" - ) - logging.warning(error) - continue - return "" + # url = "http://169.254.169.254/latest/meta-data/instance-type" + # for i in range(5): + # try: + # response = requests.get(url, timeout=1) + # if response.status_code == 200: + # return response.text + # except Exception as e: + # error = ( + # f"Received exception while sending data to {url} on {i} attempt: {e}" + # ) + # logging.warning(error) + # continue + return "Altinity runner" def prepare_tests_results_for_clickhouse( @@ -194,7 +194,7 @@ def prepare_tests_results_for_clickhouse( report_url: str, check_name: str, ) -> List[dict]: - pull_request_url = "https://github.com/ClickHouse/ClickHouse/commits/master" + pull_request_url = "https://github.com/Altinity/ClickHouse/commits/master" base_ref = "master" head_ref = "master" base_repo = pr_info.repo_full_name diff --git a/tests/ci/compatibility_check.py b/tests/ci/compatibility_check.py index 8f6d4917efe6..4ed1ecfb779d 100644 --- a/tests/ci/compatibility_check.py +++ b/tests/ci/compatibility_check.py @@ -25,8 +25,8 @@ from stopwatch import Stopwatch from upload_result_helper import upload_results -IMAGE_UBUNTU = "clickhouse/test-old-ubuntu" -IMAGE_CENTOS = "clickhouse/test-old-centos" +IMAGE_UBUNTU = "altinityinfra/test-old-ubuntu" +IMAGE_CENTOS = "altinityinfra/test-old-centos" DOWNLOAD_RETRIES_COUNT = 5 @@ -251,7 +251,7 @@ def url_filter(url): args.check_name, ) - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) if state == "failure": sys.exit(1) diff --git a/tests/ci/docker_images_check.py b/tests/ci/docker_images_check.py index 274d0d1d1dfb..abcfd1379b3a 100644 --- a/tests/ci/docker_images_check.py +++ b/tests/ci/docker_images_check.py @@ -91,22 +91,23 @@ def get_changed_docker_images( str(files_changed), ) - changed_images = [] - - for dockerfile_dir, image_description in images_dict.items(): - for f in files_changed: - if f.startswith(dockerfile_dir): - name = image_description["name"] - only_amd64 = image_description.get("only_amd64", False) - logging.info( - "Found changed file '%s' which affects " - "docker image '%s' with path '%s'", - f, - name, - dockerfile_dir, - ) - changed_images.append(DockerImage(dockerfile_dir, name, only_amd64)) - break + # Rebuild all images + changed_images = [DockerImage(dockerfile_dir, image_description["name"], image_description.get("only_amd64", False)) for dockerfile_dir, image_description in images_dict.items()] + + # for dockerfile_dir, image_description in images_dict.items(): + # for f in files_changed: + # if f.startswith(dockerfile_dir): + # name = image_description["name"] + # only_amd64 = image_description.get("only_amd64", False) + # logging.info( + # "Found changed file '%s' which affects " + # "docker image '%s' with path '%s'", + # f, + # name, + # dockerfile_dir, + # ) + # changed_images.append(DockerImage(dockerfile_dir, name, only_amd64)) + # break # The order is important: dependents should go later than bases, so that # they are built with updated base versions. @@ -236,6 +237,19 @@ def build_and_push_one_image( f"--tag {image.repo}:{version_string} " f"{cache_from} " f"--cache-to type=inline,mode=max " + # FIXME: many tests utilize packages without specifying version, hence docker pulls :latest + # this will fail multiple jobs are going to be executed on different machines and + # push different images as latest. + # To fix it we may: + # - require jobs to be executed on same machine images were built (no parallelism) + # - change all the test's code (mostly docker-compose files in integration tests) + # that depend on said images and push version somehow into docker-compose. + # (and that is lots of work and many potential conflicts with upstream) + # - tag and push all images as :latest and then just pray that collisions are infrequent. + # and if even if collision happens, image is not that different and would still properly work. + # (^^^ CURRENT SOLUTION ^^^) But this is just a numbers game, it will blow up at some point. + # - do something crazy + f"--tag {image.repo}:latest " f"{push_arg}" f"--progress plain {image.full_path}" ) @@ -244,6 +258,7 @@ def build_and_push_one_image( retcode = proc.wait() if retcode != 0: + logging.error("Building image {} failed with error: {}\n{}".format(image, retcode, ''.join(list(open(build_log, 'rt'))))) return False, build_log logging.info("Processing of %s successfully finished", image.repo) @@ -382,13 +397,25 @@ def main(): changed_json = TEMP_PATH / "changed_images.json" if args.push: + logging.info('Docker info BEFORE logging in: %s, ', subprocess.check_output( # pylint: disable=unexpected-keyword-arg + "docker info", + encoding="utf-8", + shell=True, + )) + + logging.info('Doing docker login') subprocess.check_output( # pylint: disable=unexpected-keyword-arg - "docker login --username 'robotclickhouse' --password-stdin", - input=get_parameter_from_ssm("dockerhub_robot_password"), + "docker login --username 'altinityinfra' --password-stdin", + input=get_parameter_from_ssm("dockerhub-password"), encoding="utf-8", shell=True, ) + logging.info('Docker info: %s, ', subprocess.check_output( # pylint: disable=unexpected-keyword-arg + "docker info", + encoding="utf-8", + shell=True, + )) images_dict = get_images_dict(Path(REPO_COPY), IMAGES_FILE_PATH) pr_info = PRInfo() @@ -467,7 +494,7 @@ def main(): NAME, ) ch_helper = ClickHouseHelper() - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) if status == "failure": sys.exit(1) diff --git a/tests/ci/docker_manifests_merge.py b/tests/ci/docker_manifests_merge.py index 1be2a1f2e7a7..2692a0da2cb9 100644 --- a/tests/ci/docker_manifests_merge.py +++ b/tests/ci/docker_manifests_merge.py @@ -250,8 +250,8 @@ def main(): args = parse_args() if args.push: subprocess.check_output( # pylint: disable=unexpected-keyword-arg - "docker login --username 'robotclickhouse' --password-stdin", - input=get_parameter_from_ssm("dockerhub_robot_password"), + "docker login --username 'altinityinfra' --password-stdin", + input=get_parameter_from_ssm("dockerhub-password"), encoding="utf-8", shell=True, ) @@ -315,7 +315,7 @@ def main(): NAME, ) ch_helper = ClickHouseHelper() - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) if __name__ == "__main__": diff --git a/tests/ci/docker_pull_helper.py b/tests/ci/docker_pull_helper.py index e1327f505a07..224d3a2a7ae1 100644 --- a/tests/ci/docker_pull_helper.py +++ b/tests/ci/docker_pull_helper.py @@ -5,6 +5,7 @@ import time import subprocess import logging +import traceback from pathlib import Path from typing import List, Optional, Union @@ -52,11 +53,28 @@ def get_images_with_versions( for image_name in required_images: docker_image = DockerImage(image_name, version) if image_name in images: - docker_image.version = images[image_name] + image_version = images[image_name] + # NOTE(vnemkov): For some reason we can get version as list of versions, + # in this case choose one that has commit hash and hence is the longest string. + # E.g. from ['latest-amd64', '0-amd64', '0-473d8f560fc78c6cdaabb960a537ca5ab49f795f-amd64'] + # choose '0-473d8f560fc78c6cdaabb960a537ca5ab49f795f-amd64' since it 100% points to proper commit. + if isinstance(image_version, list): + max_len = 0 + max_len_version = '' + for version_variant in image_version: + if len(version_variant) > max_len: + max_len = len(version_variant) + max_len_version = version_variant + logging.debug(f"selected version {max_len_version} from {image_version}") + image_version = max_len_version + + docker_image.version = image_version + docker_images.append(docker_image) latest_error = Exception("predefined to avoid access before created") if pull: + latest_error = None for docker_image in docker_images: for i in range(10): try: @@ -70,7 +88,8 @@ def get_images_with_versions( except Exception as ex: latest_error = ex time.sleep(i * 3) - logging.info("Got execption pulling docker %s", ex) + logging.info("Got exception pulling docker %s", ex) + latest_error = traceback.format_exc() else: raise Exception( "Cannot pull dockerhub for image docker pull " diff --git a/tests/ci/docker_server.py b/tests/ci/docker_server.py index 55bd2983ea48..d378ac12a4d0 100644 --- a/tests/ci/docker_server.py +++ b/tests/ci/docker_server.py @@ -34,7 +34,11 @@ ) TEMP_PATH = p.join(RUNNER_TEMP, "docker_images_check") -BUCKETS = {"amd64": "package_release", "arm64": "package_aarch64"} +BUCKETS = { + "amd64": "package_release", + # NOTE(vnemkov): arm64 is temporary not supported + "arm64": "package_aarch64", +} git = Git(ignore_no_tags=True) @@ -56,7 +60,7 @@ def parse_args() -> argparse.Namespace: "--version", type=version_arg, default=get_version_from_repo(git=git).string, - help="a version to build, automaticaly got from version_helper, accepts either " + help="a version to build, automatically got from version_helper, accepts either " "tag ('refs/tags/' is removed automatically) or a normal 22.2.2.2 format", ) parser.add_argument( @@ -77,7 +81,7 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "--image-repo", type=str, - default="clickhouse/clickhouse-server", + default="altinityinfra/clickhouse-server", help="image name on docker hub", ) parser.add_argument( @@ -334,11 +338,12 @@ def main(): args.bucket_prefix = ( f"{S3_DOWNLOAD}/{S3_BUILDS_BUCKET}/{release_or_pr}/{pr_info.sha}" ) + tags.append(f"{pr_info.number}-{pr_info.sha}") if args.push: subprocess.check_output( # pylint: disable=unexpected-keyword-arg - "docker login --username 'robotclickhouse' --password-stdin", - input=get_parameter_from_ssm("dockerhub_robot_password"), + "docker login --username 'altinityinfra' --password-stdin", + input=get_parameter_from_ssm("dockerhub-password"), encoding="utf-8", shell=True, ) @@ -385,7 +390,7 @@ def main(): NAME, ) ch_helper = ClickHouseHelper() - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) if status != "success": sys.exit(1) diff --git a/tests/ci/docker_test.py b/tests/ci/docker_test.py index 8aab50ed0825..def24a07f836 100644 --- a/tests/ci/docker_test.py +++ b/tests/ci/docker_test.py @@ -38,67 +38,67 @@ def test_get_changed_docker_images(self): self.maxDiff = None expected = sorted( [ - di.DockerImage("docker/test/base", "clickhouse/test-base", False), - di.DockerImage("docker/docs/builder", "clickhouse/docs-builder", True), + di.DockerImage("docker/test/base", "altinityinfra/test-base", False), + di.DockerImage("docker/docs/builder", "altinityinfra/docs-builder", True), di.DockerImage( "docker/test/sqltest", - "clickhouse/sqltest", + "altinityinfra/sqltest", False, - "clickhouse/test-base", # type: ignore + "altinityinfra/test-base", # type: ignore ), di.DockerImage( "docker/test/stateless", - "clickhouse/stateless-test", + "altinityinfra/stateless-test", False, - "clickhouse/test-base", # type: ignore + "altinityinfra/test-base", # type: ignore ), di.DockerImage( "docker/test/integration/base", - "clickhouse/integration-test", + "altinityinfra/integration-test", False, - "clickhouse/test-base", # type: ignore - ), - di.DockerImage( - "docker/test/fuzzer", - "clickhouse/fuzzer", - False, - "clickhouse/test-base", # type: ignore - ), - di.DockerImage( - "docker/test/keeper-jepsen", - "clickhouse/keeper-jepsen-test", - False, - "clickhouse/test-base", # type: ignore - ), - di.DockerImage( - "docker/docs/check", - "clickhouse/docs-check", - False, - "clickhouse/docs-builder", # type: ignore - ), - di.DockerImage( - "docker/docs/release", - "clickhouse/docs-release", - False, - "clickhouse/docs-builder", # type: ignore + "altinityinfra/test-base", # type: ignore ), + # di.DockerImage( + # "docker/test/fuzzer", + # "altinityinfra/fuzzer", + # False, + # "altinityinfra/test-base", # type: ignore + # ), + # di.DockerImage( + # # "docker/test/keeper-jepsen", + # # "altinityinfra/keeper-jepsen-test", + # False, + # "altinityinfra/test-base", # type: ignore + # ), + # di.DockerImage( + # "docker/docs/check", + # "altinityinfra/docs-check", + # False, + # "altinityinfra/docs-builder", # type: ignore + # ), + # di.DockerImage( + # "docker/docs/release", + # "altinityinfra/docs-release", + # False, + # "altinityinfra/docs-builder", # type: ignore + # ), di.DockerImage( "docker/test/stateful", - "clickhouse/stateful-test", + "altinityinfra/stateful-test", False, - "clickhouse/stateless-test", # type: ignore + "altinityinfra/stateless-test", # type: ignore ), di.DockerImage( "docker/test/unit", - "clickhouse/unit-test", + "altinityinfra/unit-test", False, - "clickhouse/stateless-test", # type: ignore + "altinityinfra/stateless-test", # type: ignore ), di.DockerImage( "docker/test/stress", - "clickhouse/stress-test", + "altinityinfra/stress-test", False, - "clickhouse/stateful-test", # type: ignore + "altinityinfra/stateful-test", # type: ignore ), ] ) diff --git a/tests/ci/docs_check.py b/tests/ci/docs_check.py index 650ed93aa710..21bd23033371 100644 --- a/tests/ci/docs_check.py +++ b/tests/ci/docs_check.py @@ -84,7 +84,7 @@ def main(): elif args.force: logging.info("Check the docs because of force flag") - docker_image = get_image_with_version(reports_path, "clickhouse/docs-builder") + docker_image = get_image_with_version(reports_path, "altinityinfra/docs-builder") test_output = temp_path / "docs_check_log" test_output.mkdir(parents=True, exist_ok=True) @@ -150,7 +150,7 @@ def main(): NAME, ) - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) if status == "failure": sys.exit(1) diff --git a/tests/ci/env_helper.py b/tests/ci/env_helper.py index 6364ea0ff7c5..08952ed3179d 100644 --- a/tests/ci/env_helper.py +++ b/tests/ci/env_helper.py @@ -17,7 +17,7 @@ CLOUDFLARE_TOKEN = os.getenv("CLOUDFLARE_TOKEN") GITHUB_EVENT_PATH = os.getenv("GITHUB_EVENT_PATH", "") GITHUB_JOB = os.getenv("GITHUB_JOB_OVERRIDDEN", "") or os.getenv("GITHUB_JOB", "local") -GITHUB_REPOSITORY = os.getenv("GITHUB_REPOSITORY", "ClickHouse/ClickHouse") +GITHUB_REPOSITORY = os.getenv("GITHUB_REPOSITORY", "Altinity/ClickHouse") GITHUB_RUN_ID = os.getenv("GITHUB_RUN_ID", "0") GITHUB_SERVER_URL = os.getenv("GITHUB_SERVER_URL", "https://github.com") GITHUB_WORKSPACE = os.getenv("GITHUB_WORKSPACE", git_root) @@ -26,9 +26,11 @@ REPORTS_PATH = os.getenv("REPORTS_PATH", p.abspath(p.join(module_dir, "./reports"))) REPO_COPY = os.getenv("REPO_COPY", GITHUB_WORKSPACE) RUNNER_TEMP = os.getenv("RUNNER_TEMP", p.abspath(p.join(module_dir, "./tmp"))) -S3_BUILDS_BUCKET = os.getenv("S3_BUILDS_BUCKET", "clickhouse-builds") -S3_TEST_REPORTS_BUCKET = os.getenv("S3_TEST_REPORTS_BUCKET", "clickhouse-test-reports") +S3_BUILDS_BUCKET = os.getenv("S3_BUILDS_BUCKET", "altinity-build-artifacts") +S3_TEST_REPORTS_BUCKET = os.getenv("S3_TEST_REPORTS_BUCKET", "altinity-build-artifacts") S3_URL = os.getenv("S3_URL", "https://s3.amazonaws.com") +CLICKHOUSE_STABLE_VERSION_SUFFIX = os.getenv("CLICKHOUSE_STABLE_VERSION_SUFFIX", "stable") + S3_DOWNLOAD = os.getenv("S3_DOWNLOAD", S3_URL) S3_ARTIFACT_DOWNLOAD_TEMPLATE = ( f"{S3_DOWNLOAD}/{S3_BUILDS_BUCKET}/" diff --git a/tests/ci/fast_test_check.py b/tests/ci/fast_test_check.py index 9e734877d6e5..3dcb56f22716 100644 --- a/tests/ci/fast_test_check.py +++ b/tests/ci/fast_test_check.py @@ -114,7 +114,7 @@ def main(): sys.exit(1) sys.exit(0) - docker_image = get_image_with_version(reports_path, "clickhouse/fasttest") + docker_image = get_image_with_version(reports_path, "altinityinfra/fasttest") s3_helper = S3Helper() @@ -213,7 +213,7 @@ def main(): report_url, NAME, ) - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) # Refuse other checks to run if fast test failed if state != "success": diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index 0736367a62f4..5046c13f81bd 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -68,9 +68,9 @@ def get_additional_envs( def get_image_name(check_name: str) -> str: if "stateless" in check_name.lower(): - return "clickhouse/stateless-test" + return "altinityinfra/stateless-test" if "stateful" in check_name.lower(): - return "clickhouse/stateful-test" + return "altinityinfra/stateful-test" else: raise Exception(f"Cannot deduce image name based on check name {check_name}") @@ -119,7 +119,8 @@ def get_run_command( return ( f"docker run --volume={builds_path}:/package_folder " - f"{ci_logs_args}" + f"{ci_logs_args} " + f"--dns=8.8.8.8 " f"--volume={repo_path}/tests:/usr/share/clickhouse-test " f"{volume_with_broken_test}" f"--volume={result_path}:/test_output " @@ -271,10 +272,12 @@ def main(): run_by_hash_total = 0 check_name_with_group = check_name - rerun_helper = RerunHelper(commit, check_name_with_group) - if rerun_helper.is_already_finished_by_status(): - logging.info("Check is already finished according to github status, exiting") - sys.exit(0) + # Always re-run, even if it finished in previous run. + # gh = Github(get_best_robot_token()) + # rerun_helper = RerunHelper(gh, pr_info, check_name_with_group) + # if rerun_helper.is_already_finished_by_status(): + # logging.info("Check is already finished according to github status, exiting") + # sys.exit(0) tests_to_run = [] if run_changed_tests: @@ -401,7 +404,7 @@ def main(): report_url, check_name_with_group, ) - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) if state != "success": if FORCE_TESTS_LABEL in pr_info.labels: diff --git a/tests/ci/get_robot_token.py b/tests/ci/get_robot_token.py index 530f894a36a0..b93eea52f700 100644 --- a/tests/ci/get_robot_token.py +++ b/tests/ci/get_robot_token.py @@ -53,8 +53,20 @@ def get_parameters_from_ssm( ROBOT_TOKEN = None # type: Optional[Token] +# NOTE(Arthur Passos): Original CI code uses the "_original" version of this method. Each robot token is rate limited +# and the original implementation selects the "best one". To make it simpler and iterate faster, +# we are using only one robot and keeping the method signature. In the future we might reconsider +# having multiple robot tokens +def get_best_robot_token(token_prefix_env_name="github_robot_token"): + # Re-use already fetched token (same as in get_best_robot_token_original) + # except here we assume it is always a string (since we use only one token and don't do token rotation) + global ROBOT_TOKEN + if ROBOT_TOKEN is not None: + return ROBOT_TOKEN + ROBOT_TOKEN = get_parameter_from_ssm(token_prefix_env_name) + return ROBOT_TOKEN -def get_best_robot_token(tokens_path: str = "/github-tokens") -> str: +def get_best_robot_token_original(token_prefix_env_name: str="github_robot_token_") -> str: global ROBOT_TOKEN if ROBOT_TOKEN is not None: return ROBOT_TOKEN.value diff --git a/tests/ci/git_helper.py b/tests/ci/git_helper.py index 9927d5a42489..35e825143a65 100644 --- a/tests/ci/git_helper.py +++ b/tests/ci/git_helper.py @@ -10,9 +10,11 @@ # ^ and $ match subline in `multiple\nlines` # \A and \Z match only start and end of the whole string +# NOTE (vnemkov): support both upstream tag style: v22.x.y.z-lts and Altinity tag style: v22.x.y.z.altinitystable +# Because at early release stages there could be no Altinity tag set on commit, only upstream one. RELEASE_BRANCH_REGEXP = r"\A\d+[.]\d+\Z" TAG_REGEXP = ( - r"\Av\d{2}[.][1-9]\d*[.][1-9]\d*[.][1-9]\d*-(testing|prestable|stable|lts)\Z" + r"\Av\d{2}[.][1-9]\d*[.][1-9]\d*[.][1-9]\d*[-\.](testing|prestable|stable|lts|altinitystable)\Z" ) SHA_REGEXP = re.compile(r"\A([0-9]|[a-f]){40}\Z") diff --git a/tests/ci/git_test.py b/tests/ci/git_test.py index 3aedd8a8dea1..0c28c8d38421 100644 --- a/tests/ci/git_test.py +++ b/tests/ci/git_test.py @@ -70,6 +70,9 @@ def test_tags(self): with self.assertRaises(Exception): setattr(self.git, tag_attr, tag) + def check_tag(self): + self.git.check_tag("v21.12.333.4567-altinitystable") + def test_tweak(self): self.git.commits_since_tag = 0 self.assertEqual(self.git.tweak, 1) @@ -79,3 +82,6 @@ def test_tweak(self): self.assertEqual(self.git.tweak, 22224) self.git.commits_since_tag = 0 self.assertEqual(self.git.tweak, 22222) + +if __name__ == '__main__': + unittest.main() diff --git a/tests/ci/install_check.py b/tests/ci/install_check.py index 9971d0c236c6..fa045df7d73a 100644 --- a/tests/ci/install_check.py +++ b/tests/ci/install_check.py @@ -36,8 +36,8 @@ from upload_result_helper import upload_results -RPM_IMAGE = "clickhouse/install-rpm-test" -DEB_IMAGE = "clickhouse/install-deb-test" +RPM_IMAGE = "altinityinfra/install-rpm-test" +DEB_IMAGE = "altinityinfra/install-deb-test" TEMP_PATH = Path(TEMP) LOGS_PATH = TEMP_PATH / "tests_logs" @@ -372,7 +372,7 @@ def filter_artifacts(path: str) -> bool: args.check_name, ) - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) if state == FAILURE: sys.exit(1) diff --git a/tests/ci/integration_test_check.py b/tests/ci/integration_test_check.py index 9ac339ac17db..2b77ddaa8ff4 100644 --- a/tests/ci/integration_test_check.py +++ b/tests/ci/integration_test_check.py @@ -39,18 +39,18 @@ # When update, update # tests/integration/ci-runner.py:ClickhouseIntegrationTestsRunner.get_images_names too IMAGES = [ - "clickhouse/dotnet-client", - "clickhouse/integration-helper", - "clickhouse/integration-test", - "clickhouse/integration-tests-runner", - "clickhouse/kerberized-hadoop", - "clickhouse/kerberos-kdc", - "clickhouse/mysql-golang-client", - "clickhouse/mysql-java-client", - "clickhouse/mysql-js-client", - "clickhouse/mysql-php-client", - "clickhouse/nginx-dav", - "clickhouse/postgresql-java-client", + "altinityinfra/dotnet-client", + "altinityinfra/integration-helper", + "altinityinfra/integration-test", + "altinityinfra/integration-tests-runner", + "altinityinfra/kerberized-hadoop", + "altinityinfra/kerberos-kdc", + "altinityinfra/mysql-golang-client", + "altinityinfra/mysql-java-client", + "altinityinfra/mysql-js-client", + "altinityinfra/mysql-php-client", + "altinityinfra/nginx-dav", + "altinityinfra/postgresql-java-client", ] @@ -60,6 +60,7 @@ def get_json_params_dict( docker_images: List[DockerImage], run_by_hash_total: int, run_by_hash_num: int, + dockerd_volume_dir: Path ) -> dict: return { "context_name": check_name, @@ -72,6 +73,7 @@ def get_json_params_dict( "disable_net_host": True, "run_by_hash_total": run_by_hash_total, "run_by_hash_num": run_by_hash_num, + "dockerd_volume_dir": dockerd_volume_dir.as_posix(), } @@ -210,10 +212,12 @@ def main(): gh = Github(get_best_robot_token(), per_page=100) commit = get_commit(gh, pr_info.sha) - rerun_helper = RerunHelper(commit, check_name_with_group) - if rerun_helper.is_already_finished_by_status(): - logging.info("Check is already finished according to github status, exiting") - sys.exit(0) + # Always re-run, even if it finished in previous run. + # gh = Github(get_best_robot_token()) + # rerun_helper = RerunHelper(gh, pr_info, check_name_with_group) + # if rerun_helper.is_already_finished_by_status(): + # logging.info("Check is already finished according to github status, exiting") + # sys.exit(0) images = get_images_with_versions(reports_path, IMAGES) result_path = temp_path / "output_dir" @@ -225,6 +229,9 @@ def main(): build_path = temp_path / "build" build_path.mkdir(parents=True, exist_ok=True) + dockerd_volume_dir = temp_path / "dockerd_volume_dir" + dockerd_volume_dir.mkdir(parents=True, exist_ok=True) + if validate_bugfix_check: download_last_release(build_path) else: @@ -243,6 +250,7 @@ def main(): images, run_by_hash_total, run_by_hash_num, + dockerd_volume_dir, ) ) json_params.write(params_text) @@ -311,7 +319,7 @@ def main(): check_name_with_group, ) - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) if state == "failure": sys.exit(1) diff --git a/tests/ci/jepsen_check.py b/tests/ci/jepsen_check.py index 94ec8f937900..ae48cf960ded 100644 --- a/tests/ci/jepsen_check.py +++ b/tests/ci/jepsen_check.py @@ -34,10 +34,10 @@ KEEPER_DESIRED_INSTANCE_COUNT = 3 SERVER_DESIRED_INSTANCE_COUNT = 4 -KEEPER_IMAGE_NAME = "clickhouse/keeper-jepsen-test" +KEEPER_IMAGE_NAME = "altinityinfra/keeper-jepsen-test" KEEPER_CHECK_NAME = "ClickHouse Keeper Jepsen" -SERVER_IMAGE_NAME = "clickhouse/server-jepsen-test" +SERVER_IMAGE_NAME = "altinityinfra/server-jepsen-test" SERVER_CHECK_NAME = "ClickHouse Server Jepsen" @@ -304,7 +304,7 @@ def main(): report_url, check_name, ) - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) clear_autoscaling_group() diff --git a/tests/ci/performance_comparison_check.py b/tests/ci/performance_comparison_check.py index 2e4989c66cfc..fd523fa0318f 100644 --- a/tests/ci/performance_comparison_check.py +++ b/tests/ci/performance_comparison_check.py @@ -31,7 +31,7 @@ from clickhouse_helper import get_instance_type from stopwatch import Stopwatch -IMAGE_NAME = "clickhouse/performance-comparison" +IMAGE_NAME = "altinityinfra/performance-comparison" def get_run_command( diff --git a/tests/ci/sign_release.py b/tests/ci/sign_release.py new file mode 100644 index 000000000000..ddec398ef55d --- /dev/null +++ b/tests/ci/sign_release.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python3 +import sys +import os +import logging +from env_helper import TEMP_PATH, REPO_COPY, REPORTS_PATH +from s3_helper import S3Helper +from pr_info import PRInfo +from build_download_helper import download_builds_filter +import hashlib +from pathlib import Path + +GPG_BINARY_SIGNING_KEY = os.getenv("GPG_BINARY_SIGNING_KEY") +GPG_BINARY_SIGNING_PASSPHRASE = os.getenv("GPG_BINARY_SIGNING_PASSPHRASE") + +CHECK_NAME = "Sign release (actions)" + +def hash_file(file_path): + BLOCK_SIZE = 65536 # The size of each read from the file + + file_hash = hashlib.sha256() # Create the hash object, can use something other than `.sha256()` if you wish + with open(file_path, 'rb') as f: # Open the file to read it's bytes + fb = f.read(BLOCK_SIZE) # Read from the file. Take in the amount declared above + while len(fb) > 0: # While there is still data being read from the file + file_hash.update(fb) # Update the hash + fb = f.read(BLOCK_SIZE) # Read the next block from the file + + hash_file_path = file_path + '.sha256' + with open(hash_file_path, 'x') as f: + digest = file_hash.hexdigest() + f.write(digest) + print(f'Hashed {file_path}: {digest}') + + return hash_file_path + +def sign_file(file_path): + priv_key_file_path = 'priv.key' + with open(priv_key_file_path, 'x') as f: + f.write(GPG_BINARY_SIGNING_KEY) + + out_file_path = f'{file_path}.gpg' + + os.system(f'echo {GPG_BINARY_SIGNING_PASSPHRASE} | gpg --batch --import {priv_key_file_path}') + os.system(f'gpg -o {out_file_path} --pinentry-mode=loopback --batch --yes --passphrase {GPG_BINARY_SIGNING_PASSPHRASE} --sign {file_path}') + print(f"Signed {file_path}") + os.remove(priv_key_file_path) + + return out_file_path + +def main(): + reports_path = REPORTS_PATH + + if not os.path.exists(TEMP_PATH): + os.makedirs(TEMP_PATH) + + pr_info = PRInfo() + + logging.info("Repo copy path %s", REPO_COPY) + + s3_helper = S3Helper() + + s3_path_prefix = f"{pr_info.number}/{pr_info.sha}/" + CHECK_NAME.lower().replace( + " ", "_" + ).replace("(", "_").replace(")", "_").replace(",", "_") + + # downloads `package_release` artifacts generated + download_builds_filter(CHECK_NAME, reports_path, TEMP_PATH) + + for f in os.listdir(TEMP_PATH): + full_path = os.path.join(TEMP_PATH, f) + hashed_file_path = hash_file(full_path) + signed_file_path = Path(sign_file(hashed_file_path)) + s3_path = f'{s3_path_prefix}/{os.path.basename(signed_file_path)}' + s3_helper.upload_build_file_to_s3(signed_file_path, s3_path) + print(f'Uploaded file {signed_file_path} to {s3_path}') + + # Signed hashes are: + # clickhouse-client_22.3.15.2.altinitystable_amd64.deb.sha512.gpg clickhouse-keeper_22.3.15.2.altinitystable_x86_64.apk.sha512.gpg + # clickhouse-client-22.3.15.2.altinitystable-amd64.tgz.sha512.gpg clickhouse-keeper-22.3.15.2.altinitystable.x86_64.rpm.sha512.gpg + # clickhouse-client_22.3.15.2.altinitystable_x86_64.apk.sha512.gpg clickhouse-keeper-dbg_22.3.15.2.altinitystable_amd64.deb.sha512.gpg + # clickhouse-client-22.3.15.2.altinitystable.x86_64.rpm.sha512.gpg clickhouse-keeper-dbg-22.3.15.2.altinitystable-amd64.tgz.sha512.gpg + # clickhouse-common-static_22.3.15.2.altinitystable_amd64.deb.sha512.gpg clickhouse-keeper-dbg_22.3.15.2.altinitystable_x86_64.apk.sha512.gpg + # clickhouse-common-static-22.3.15.2.altinitystable-amd64.tgz.sha512.gpg clickhouse-keeper-dbg-22.3.15.2.altinitystable.x86_64.rpm.sha512.gpg + # clickhouse-common-static_22.3.15.2.altinitystable_x86_64.apk.sha512.gpg clickhouse-keeper.sha512.gpg + # clickhouse-common-static-22.3.15.2.altinitystable.x86_64.rpm.sha512.gpg clickhouse-library-bridge.sha512.gpg + # clickhouse-common-static-dbg_22.3.15.2.altinitystable_amd64.deb.sha512.gpg clickhouse-odbc-bridge.sha512.gpg + # clickhouse-common-static-dbg-22.3.15.2.altinitystable-amd64.tgz.sha512.gpg clickhouse-server_22.3.15.2.altinitystable_amd64.deb.sha512.gpg + # clickhouse-common-static-dbg_22.3.15.2.altinitystable_x86_64.apk.sha512.gpg clickhouse-server-22.3.15.2.altinitystable-amd64.tgz.sha512.gpg + # clickhouse-common-static-dbg-22.3.15.2.altinitystable.x86_64.rpm.sha512.gpg clickhouse-server_22.3.15.2.altinitystable_x86_64.apk.sha512.gpg + # clickhouse-keeper_22.3.15.2.altinitystable_amd64.deb.sha512.gpg clickhouse-server-22.3.15.2.altinitystable.x86_64.rpm.sha512.gpg + # clickhouse-keeper-22.3.15.2.altinitystable-amd64.tgz.sha512.gpg clickhouse.sha512.gpg + + sys.exit(0) + +if __name__ == "__main__": + main() diff --git a/tests/ci/sqlancer_check.py b/tests/ci/sqlancer_check.py index 47bc3b2c1e8c..ebe46bc9502c 100644 --- a/tests/ci/sqlancer_check.py +++ b/tests/ci/sqlancer_check.py @@ -29,7 +29,7 @@ from tee_popen import TeePopen from upload_result_helper import upload_results -IMAGE_NAME = "clickhouse/sqlancer-test" +IMAGE_NAME = "altinityinfra/sqlancer-test" def get_run_command(download_url: str, workspace_path: Path, image: DockerImage) -> str: @@ -160,7 +160,7 @@ def main(): report_url, check_name, ) - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) if __name__ == "__main__": diff --git a/tests/ci/sqllogic_test.py b/tests/ci/sqllogic_test.py index 7650a4afa40a..55c2134d5b48 100755 --- a/tests/ci/sqllogic_test.py +++ b/tests/ci/sqllogic_test.py @@ -30,7 +30,7 @@ NO_CHANGES_MSG = "Nothing to run" -IMAGE_NAME = "clickhouse/sqllogic-test" +IMAGE_NAME = "altinityinfra/sqllogic-test" def get_run_command( diff --git a/tests/ci/sqltest.py b/tests/ci/sqltest.py index a4eb1b23349d..10b13ca8ffe9 100644 --- a/tests/ci/sqltest.py +++ b/tests/ci/sqltest.py @@ -28,7 +28,7 @@ from s3_helper import S3Helper from stopwatch import Stopwatch -IMAGE_NAME = "clickhouse/sqltest" +IMAGE_NAME = "altinityinfra/sqltest" def get_run_command(pr_number, sha, download_url, workspace_path, image): @@ -146,7 +146,7 @@ def main(): check_name, ) - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) logging.info("Result: '%s', '%s', '%s'", status, description, report_url) print(f"::notice ::Report url: {report_url}") diff --git a/tests/ci/stress_check.py b/tests/ci/stress_check.py index afc5c3d74980..012bd1bcf88c 100644 --- a/tests/ci/stress_check.py +++ b/tests/ci/stress_check.py @@ -190,11 +190,11 @@ def run_stress_test(docker_image_name: str) -> None: report_url, check_name, ) - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) if state == "failure": sys.exit(1) if __name__ == "__main__": - run_stress_test("clickhouse/stress-test") + run_stress_test("altinityinfra/stress-test") diff --git a/tests/ci/style_check.py b/tests/ci/style_check.py index a006e01ff6bb..d83f12dc56b6 100644 --- a/tests/ci/style_check.py +++ b/tests/ci/style_check.py @@ -163,7 +163,7 @@ def main(): code = int(state != "success") sys.exit(code) - docker_image = get_image_with_version(reports_path, "clickhouse/style-test") + docker_image = get_image_with_version(reports_path, "altinityinfra/style-test") s3_helper = S3Helper() cmd = ( @@ -199,7 +199,7 @@ def main(): report_url, NAME, ) - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) if state in ["error", "failure"]: sys.exit(1) diff --git a/tests/ci/tests/docker_images_for_tests.json b/tests/ci/tests/docker_images_for_tests.json index 70db87605616..008c60ba6206 100644 --- a/tests/ci/tests/docker_images_for_tests.json +++ b/tests/ci/tests/docker_images_for_tests.json @@ -1,162 +1,124 @@ { "docker/packager/deb": { - "name": "clickhouse/deb-builder", + "name": "altinityinfra/deb-builder", "dependent": [] }, "docker/packager/binary": { - "name": "clickhouse/binary-builder", - "dependent": [ - "docker/test/codebrowser" - ] + "name": "altinityinfra/binary-builder", + "dependent": [] }, "docker/test/compatibility/centos": { - "name": "clickhouse/test-old-centos", + "name": "altinityinfra/test-old-centos", "dependent": [] }, "docker/test/compatibility/ubuntu": { - "name": "clickhouse/test-old-ubuntu", + "name": "altinityinfra/test-old-ubuntu", "dependent": [] }, "docker/test/integration/base": { - "name": "clickhouse/integration-test", - "dependent": [] - }, - "docker/test/fuzzer": { - "name": "clickhouse/fuzzer", - "dependent": [] - }, - "docker/test/performance-comparison": { - "name": "clickhouse/performance-comparison", + "name": "altinityinfra/integration-test", "dependent": [] }, "docker/test/util": { - "name": "clickhouse/test-util", + "name": "altinityinfra/test-util", "dependent": [ "docker/test/base", "docker/test/fasttest" ] }, "docker/test/stateless": { - "name": "clickhouse/stateless-test", + "name": "altinityinfra/stateless-test", "dependent": [ "docker/test/stateful", "docker/test/unit" ] }, "docker/test/stateful": { - "name": "clickhouse/stateful-test", + "name": "altinityinfra/stateful-test", "dependent": [ "docker/test/stress" ] }, "docker/test/unit": { - "name": "clickhouse/unit-test", - "dependent": [] - }, - "docker/test/stress": { - "name": "clickhouse/stress-test", - "dependent": [] - }, - "docker/test/codebrowser": { - "name": "clickhouse/codebrowser", + "name": "altinityinfra/unit-test", "dependent": [] }, "docker/test/integration/runner": { - "name": "clickhouse/integration-tests-runner", + "name": "altinityinfra/integration-tests-runner", "dependent": [] }, "docker/test/fasttest": { - "name": "clickhouse/fasttest", - "dependent": [] - }, - "docker/test/style": { - "name": "clickhouse/style-test", + "name": "altinityinfra/fasttest", "dependent": [] }, "docker/test/integration/s3_proxy": { - "name": "clickhouse/s3-proxy", + "name": "altinityinfra/s3-proxy", "dependent": [] }, "docker/test/integration/resolver": { - "name": "clickhouse/python-bottle", + "name": "altinityinfra/python-bottle", "dependent": [] }, "docker/test/integration/helper_container": { - "name": "clickhouse/integration-helper", + "name": "altinityinfra/integration-helper", "dependent": [] }, "docker/test/integration/mysql_golang_client": { - "name": "clickhouse/mysql-golang-client", + "name": "altinityinfra/mysql-golang-client", "dependent": [] }, "docker/test/integration/dotnet_client": { - "name": "clickhouse/dotnet-client", + "name": "altinityinfra/dotnet-client", "dependent": [] }, "docker/test/integration/mysql_java_client": { - "name": "clickhouse/mysql-java-client", + "name": "altinityinfra/mysql-java-client", "dependent": [] }, "docker/test/integration/mysql_js_client": { - "name": "clickhouse/mysql-js-client", + "name": "altinityinfra/mysql-js-client", "dependent": [] }, "docker/test/integration/mysql_php_client": { - "name": "clickhouse/mysql-php-client", + "name": "altinityinfra/mysql-php-client", "dependent": [] }, "docker/test/integration/postgresql_java_client": { - "name": "clickhouse/postgresql-java-client", + "name": "altinityinfra/postgresql-java-client", "dependent": [] }, "docker/test/integration/kerberos_kdc": { - "name": "clickhouse/kerberos-kdc", + "name": "altinityinfra/kerberos-kdc", "dependent": [] }, "docker/test/base": { - "name": "clickhouse/test-base", - "dependent": [ + "name": "altinityinfra/test-base", + "dependent": [ "docker/test/stateless", "docker/test/integration/base", "docker/test/fuzzer", "docker/test/keeper-jepsen", "docker/test/sqltest" - ] + ] }, "docker/test/integration/kerberized_hadoop": { - "name": "clickhouse/kerberized-hadoop", + "name": "altinityinfra/kerberized-hadoop", "dependent": [] }, "docker/test/sqlancer": { - "name": "clickhouse/sqlancer-test", + "name": "altinityinfra/sqlancer-test", "dependent": [] }, "docker/test/keeper-jepsen": { - "name": "clickhouse/keeper-jepsen-test", - "dependent": [] - }, - "docker/docs/builder": { - "name": "clickhouse/docs-builder", - "only_amd64": true, - "dependent": [ - "docker/docs/check", - "docker/docs/release" - ] - }, - "docker/docs/check": { - "name": "clickhouse/docs-check", - "dependent": [] - }, - "docker/docs/release": { - "name": "clickhouse/docs-release", + "name": "altinityinfra/keeper-jepsen-test", "dependent": [] }, "docker/test/sqllogic": { - "name": "clickhouse/sqllogic-test", + "name": "altinityinfra/sqllogic-test", "dependent": [] }, "docker/test/sqltest": { - "name": "clickhouse/sqltest", + "name": "altinityinfra/sqltest", "dependent": [] } } diff --git a/tests/ci/unit_tests_check.py b/tests/ci/unit_tests_check.py index 6384b0ff432e..6f6aa3c3a9b2 100644 --- a/tests/ci/unit_tests_check.py +++ b/tests/ci/unit_tests_check.py @@ -32,7 +32,7 @@ from upload_result_helper import upload_results -IMAGE_NAME = "clickhouse/unit-test" +IMAGE_NAME = "altinityinfra/unit-test" def get_test_name(line): @@ -183,7 +183,7 @@ def main(): check_name, ) - ch_helper.insert_events_into(db="default", table="checks", events=prepared_events) + ch_helper.insert_events_into(db="gh-data", table="checks", events=prepared_events) if state == "failure": sys.exit(1) diff --git a/tests/ci/upgrade_check.py b/tests/ci/upgrade_check.py index 83b6f9e299fd..f84451cad81d 100644 --- a/tests/ci/upgrade_check.py +++ b/tests/ci/upgrade_check.py @@ -1,4 +1,4 @@ import stress_check if __name__ == "__main__": - stress_check.run_stress_test("clickhouse/upgrade-check") + stress_check.run_stress_test("altinityinfra/upgrade-check") diff --git a/tests/ci/version_helper.py b/tests/ci/version_helper.py index fb046e989a91..4b75974500c8 100755 --- a/tests/ci/version_helper.py +++ b/tests/ci/version_helper.py @@ -46,6 +46,7 @@ def __init__( revision: Union[int, str], git: Optional[Git], tweak: Optional[str] = None, + flavour: Optional[str] = None, ): self._major = int(major) self._minor = int(minor) @@ -59,6 +60,7 @@ def __init__( self._tweak = self._git.tweak self._describe = "" self._description = "" + self._flavour = flavour def update(self, part: Literal["major", "minor", "patch"]) -> "ClickHouseVersion": """If part is valid, returns a new version""" @@ -132,9 +134,12 @@ def description(self) -> str: @property def string(self): - return ".".join( + version_as_string = ".".join( (str(self.major), str(self.minor), str(self.patch), str(self.tweak)) ) + if self._flavour: + version_as_string = f"{version_as_string}.{self._flavour}" + return version_as_string def as_dict(self) -> VERSIONS: return { @@ -155,7 +160,10 @@ def with_description(self, version_type): if version_type not in VersionType.VALID: raise ValueError(f"version type {version_type} not in {VersionType.VALID}") self._description = version_type - self._describe = f"v{self.string}-{version_type}" + if version_type == self._flavour: + self._describe = f"v{self.string}" + else: + self._describe = f"v{self.string}-{version_type}" def __eq__(self, other: Any) -> bool: if not isinstance(self, type(other)): @@ -183,16 +191,17 @@ def __le__(self, other: "ClickHouseVersion") -> bool: class VersionType: LTS = "lts" PRESTABLE = "prestable" - STABLE = "stable" + STABLE = "altinitystable" TESTING = "testing" VALID = (TESTING, PRESTABLE, STABLE, LTS) def validate_version(version: str) -> None: + # NOTE(vnemkov): minor but imporant fixes, so versions with 'flavour' are threated as valid (e.g. 22.8.8.4.altinitystable) parts = version.split(".") - if len(parts) != 4: + if len(parts) < 4: raise ValueError(f"{version} does not contain 4 parts") - for part in parts: + for part in parts[:4]: int(part) @@ -232,6 +241,9 @@ def get_version_from_repo( versions["patch"], versions["revision"], git, + # Explicitly use tweak value from version file + tweak=versions.get("tweak", versions["revision"]), + flavour=versions.get("flavour", None) ) @@ -239,8 +251,17 @@ def get_version_from_string( version: str, git: Optional[Git] = None ) -> ClickHouseVersion: validate_version(version) - parts = version.split(".") - return ClickHouseVersion(parts[0], parts[1], parts[2], -1, git, parts[3]) + # dict for simple handling of missing parts with parts.get(index, default) + parts = dict(enumerate(version.split("."))) + return ClickHouseVersion( + parts[0], + parts[1], + parts[2], + -1, + git, + parts.get(3, None), + parts.get(4, None) + ) def get_version_from_tag(tag: str) -> ClickHouseVersion: @@ -314,7 +335,7 @@ def update_contributors( cfd.write(content) -def update_version_local(version, version_type="testing"): +def update_version_local(version : ClickHouseVersion, version_type="testing"): update_contributors() version.with_description(version_type) update_cmake_version(version) diff --git a/tests/integration/ci-runner.py b/tests/integration/ci-runner.py index aa89ccf11b36..06746fc919d4 100755 --- a/tests/integration/ci-runner.py +++ b/tests/integration/ci-runner.py @@ -15,8 +15,8 @@ import zlib # for crc32 -MAX_RETRY = 1 -NUM_WORKERS = 5 +MAX_RETRY = 3 +NUM_WORKERS = 10 SLEEP_BETWEEN_RETRIES = 5 PARALLEL_GROUP_SIZE = 100 CLICKHOUSE_BINARY_PATH = "usr/bin/clickhouse" @@ -255,6 +255,7 @@ def __init__(self, result_path, params): ) # if use_tmpfs is not set we assume it to be true, otherwise check self.use_tmpfs = "use_tmpfs" not in self.params or self.params["use_tmpfs"] + self.dockerd_volume_dir = self.params.get("dockerd_volume_dir", None) self.disable_net_host = ( "disable_net_host" in self.params and self.params["disable_net_host"] ) @@ -303,18 +304,18 @@ def shuffle_test_groups(self): @staticmethod def get_images_names(): return [ - "clickhouse/dotnet-client", - "clickhouse/integration-helper", - "clickhouse/integration-test", - "clickhouse/integration-tests-runner", - "clickhouse/kerberized-hadoop", - "clickhouse/kerberos-kdc", - "clickhouse/mysql-golang-client", - "clickhouse/mysql-java-client", - "clickhouse/mysql-js-client", - "clickhouse/mysql-php-client", - "clickhouse/nginx-dav", - "clickhouse/postgresql-java-client", + "altinityinfra/dotnet-client", + "altinityinfra/integration-helper", + "altinityinfra/integration-test", + "altinityinfra/integration-tests-runner", + "altinityinfra/kerberized-hadoop", + "altinityinfra/kerberos-kdc", + "altinityinfra/mysql-golang-client", + "altinityinfra/mysql-java-client", + "altinityinfra/mysql-js-client", + "altinityinfra/mysql-php-client", + "altinityinfra/nginx-dav", + "altinityinfra/postgresql-java-client", ] def _pre_pull_images(self, repo_path): @@ -322,7 +323,7 @@ def _pre_pull_images(self, repo_path): cmd = ( "cd {repo_path}/tests/integration && " - "timeout --signal=KILL 1h ./runner {runner_opts} {image_cmd} --pre-pull --command '{command}' ".format( + "timeout --signal=KILL 2h ./runner {runner_opts} {image_cmd} --pre-pull --command '{command}' ".format( repo_path=repo_path, runner_opts=self._get_runner_opts(), image_cmd=image_cmd, @@ -419,8 +420,11 @@ def _compress_logs(self, dir, relpaths, result_path): def _get_runner_opts(self): result = [] - if self.use_tmpfs: + if self.dockerd_volume_dir: + result.append(f"--dockerd-volume-dir={self.dockerd_volume_dir}") + elif self.use_tmpfs: result.append("--tmpfs") + if self.disable_net_host: result.append("--disable-net-host") if self.use_analyzer: @@ -546,7 +550,7 @@ def _get_runner_image_cmd(self, repo_path): "--docker-image-version", ): for img in self.get_images_names(): - if img == "clickhouse/integration-tests-runner": + if img == "altinityinfra/integration-tests-runner": runner_version = self.get_image_version(img) logging.info( "Can run with custom docker image version %s", runner_version @@ -671,8 +675,13 @@ def run_test_group( test_cmd = " ".join([shlex.quote(test) for test in sorted(test_names)]) parallel_cmd = ( - " --parallel {} ".format(num_workers) if num_workers > 0 else "" + " --parallel {} ".format(num_workers) if (num_workers > 0 or i > 0) else "" ) + # For each re-run reduce number of workers, + # to improve chances of tests passing. + if num_workers and num_workers > 0: + num_workers = max(1, num_workers // 2) + # -r -- show extra test summary: # -f -- (f)ailed # -E -- (E)rror @@ -877,6 +886,10 @@ def run_impl(self, repo_path, build_path): logging.info("Pulling images") runner._pre_pull_images(repo_path) + if self.dockerd_volume_dir: + logging.info("Cached pre-pulled docker images into %s:\n%s", + self.dockerd_volume_dir, + subprocess.check_output(f"du -hs {shlex.quote(self.dockerd_volume_dir)}", shell=True)) logging.info( "Dump iptables before run %s", diff --git a/tests/integration/helpers/cluster.py b/tests/integration/helpers/cluster.py index b9e7e4f5c6d0..4282f1895ac2 100644 --- a/tests/integration/helpers/cluster.py +++ b/tests/integration/helpers/cluster.py @@ -39,6 +39,7 @@ except Exception as e: logging.warning(f"Cannot import some modules, some tests may not work: {e}") + from dict2xml import dict2xml from kazoo.client import KazooClient from kazoo.exceptions import KazooException @@ -718,6 +719,12 @@ def redis_port(self): return self._redis_port def print_all_docker_pieces(self): + logging.debug("!!! Docker info: %s", subprocess.check_output( + "docker info", + shell=True, + universal_newlines=True, + )) + res_networks = subprocess.check_output( f"docker network ls --filter name='{self.project_name}*'", shell=True, @@ -946,7 +953,7 @@ def setup_keeper_cmd(self, instance, env_variables, docker_compose_yml_dir): env_variables["keeper_binary"] = binary_path env_variables["keeper_cmd_prefix"] = keeper_cmd_prefix - env_variables["image"] = "clickhouse/integration-test:" + self.docker_base_tag + env_variables["image"] = "altinityinfra/integration-test:" + self.docker_base_tag env_variables["user"] = str(os.getuid()) env_variables["keeper_fs"] = "bind" for i in range(1, 4): @@ -1563,7 +1570,7 @@ def add_instance( allow_analyzer=True, hostname=None, env_variables=None, - image="clickhouse/integration-test", + image="altinityinfra/integration-test", tag=None, stay_alive=False, ipv4_address=None, @@ -3251,7 +3258,7 @@ def __init__( copy_common_configs=True, hostname=None, env_variables=None, - image="clickhouse/integration-test", + image="altinityinfra/integration-test", tag="latest", stay_alive=False, ipv4_address=None, diff --git a/tests/integration/helpers/network.py b/tests/integration/helpers/network.py index e6e79dc79478..3689bb409d15 100644 --- a/tests/integration/helpers/network.py +++ b/tests/integration/helpers/network.py @@ -243,7 +243,7 @@ def __init__( def _ensure_container(self): if self._container is None or self._container_expire_time <= time.time(): - image_name = "clickhouse/integration-helper:" + os.getenv( + image_name = "altinityinfra/integration-helper:" + os.getenv( "DOCKER_HELPER_TAG", "latest" ) for i in range(5): diff --git a/tests/integration/runner b/tests/integration/runner index 4c2b10545389..ef00881d3358 100755 --- a/tests/integration/runner +++ b/tests/integration/runner @@ -29,7 +29,7 @@ CONFIG_DIR_IN_REPO = "programs/server" INTEGRATION_DIR_IN_REPO = "tests/integration" SRC_DIR_IN_REPO = "src" -DIND_INTEGRATION_TESTS_IMAGE_NAME = "clickhouse/integration-tests-runner" +DIND_INTEGRATION_TESTS_IMAGE_NAME = "altinityinfra/integration-tests-runner" def check_args_and_update_paths(args): @@ -333,25 +333,25 @@ if __name__ == "__main__": [image, tag] = img_tag.split(":") if image == "clickhouse/dotnet-client": env_tags += "-e {}={} ".format("DOCKER_DOTNET_CLIENT_TAG", tag) - elif image == "clickhouse/integration-helper": + elif image == "altinityinfra/integration-helper": env_tags += "-e {}={} ".format("DOCKER_HELPER_TAG", tag) - elif image == "clickhouse/integration-test": + elif image == "altinityinfra/integration-test": env_tags += "-e {}={} ".format("DOCKER_BASE_TAG", tag) - elif image == "clickhouse/kerberized-hadoop": + elif image == "altinityinfra/kerberized-hadoop": env_tags += "-e {}={} ".format("DOCKER_KERBERIZED_HADOOP_TAG", tag) - elif image == "clickhouse/kerberos-kdc": + elif image == "altinityinfra/kerberos-kdc": env_tags += "-e {}={} ".format("DOCKER_KERBEROS_KDC_TAG", tag) - elif image == "clickhouse/mysql-golang-client": + elif image == "altinityinfra/mysql-golang-client": env_tags += "-e {}={} ".format("DOCKER_MYSQL_GOLANG_CLIENT_TAG", tag) - elif image == "clickhouse/mysql-java-client": + elif image == "altinityinfra/mysql-java-client": env_tags += "-e {}={} ".format("DOCKER_MYSQL_JAVA_CLIENT_TAG", tag) - elif image == "clickhouse/mysql-js-client": + elif image == "altinityinfra/mysql-js-client": env_tags += "-e {}={} ".format("DOCKER_MYSQL_JS_CLIENT_TAG", tag) - elif image == "clickhouse/mysql-php-client": + elif image == "altinityinfra/mysql-php-client": env_tags += "-e {}={} ".format("DOCKER_MYSQL_PHP_CLIENT_TAG", tag) - elif image == "clickhouse/nginx-dav": + elif image == "altinityinfra/nginx-dav": env_tags += "-e {}={} ".format("DOCKER_NGINX_DAV_TAG", tag) - elif image == "clickhouse/postgresql-java-client": + elif image == "altinityinfra/postgresql-java-client": env_tags += "-e {}={} ".format("DOCKER_POSTGRESQL_JAVA_CLIENT_TAG", tag) else: logging.info("Unknown image %s" % (image)) diff --git a/tests/integration/test_backward_compatibility/test_functions.py b/tests/integration/test_backward_compatibility/test_functions.py index c86c3ba0ab29..eb82e720f12c 100644 --- a/tests/integration/test_backward_compatibility/test_functions.py +++ b/tests/integration/test_backward_compatibility/test_functions.py @@ -12,6 +12,7 @@ upstream = cluster.add_instance("upstream", allow_analyzer=False) backward = cluster.add_instance( "backward", + # NOTE(vnemkov): don't change that to altinitystable/clickhouse-server image="clickhouse/clickhouse-server", # Note that a bug changed the string representation of several aggregations in 22.9 and 22.10 and some minor # releases of 22.8, 22.7 and 22.3 diff --git a/tests/integration/test_backward_compatibility/test_insert_profile_events.py b/tests/integration/test_backward_compatibility/test_insert_profile_events.py index 8564c6b59526..bc9676541fb3 100644 --- a/tests/integration/test_backward_compatibility/test_insert_profile_events.py +++ b/tests/integration/test_backward_compatibility/test_insert_profile_events.py @@ -10,6 +10,7 @@ upstream_node = cluster.add_instance("upstream_node", allow_analyzer=False) old_node = cluster.add_instance( "old_node", + # NOTE(vnemkov): do not change to altinityinfra/clickhouse-server image="clickhouse/clickhouse-server", tag="22.5.1.2079", with_installed_binary=True, diff --git a/tests/integration/test_backward_compatibility/test_ip_types_binary_compatibility.py b/tests/integration/test_backward_compatibility/test_ip_types_binary_compatibility.py index 04016755a245..9c62b78a328f 100644 --- a/tests/integration/test_backward_compatibility/test_ip_types_binary_compatibility.py +++ b/tests/integration/test_backward_compatibility/test_ip_types_binary_compatibility.py @@ -6,6 +6,7 @@ # Version 21.6.3.14 has incompatible partition id for tables with UUID in partition key. node_22_6 = cluster.add_instance( "node_22_6", + # NOTE(vnemkov): do not change to altinityinfra/clickhouse-server image="clickhouse/clickhouse-server", tag="22.6", stay_alive=True, diff --git a/tests/integration/test_backward_compatibility/test_vertical_merges_from_compact_parts.py b/tests/integration/test_backward_compatibility/test_vertical_merges_from_compact_parts.py index 9c9d1a4d3121..6e25c71c92e1 100644 --- a/tests/integration/test_backward_compatibility/test_vertical_merges_from_compact_parts.py +++ b/tests/integration/test_backward_compatibility/test_vertical_merges_from_compact_parts.py @@ -6,6 +6,7 @@ node_old = cluster.add_instance( "node1", + # NOTE(vnemkov): do not change to altinityinfra/clickhouse-server image="clickhouse/clickhouse-server", tag="22.8", stay_alive=True, diff --git a/tests/integration/test_distributed_inter_server_secret/test.py b/tests/integration/test_distributed_inter_server_secret/test.py index d3b5fb9b2369..2900e325b5a4 100644 --- a/tests/integration/test_distributed_inter_server_secret/test.py +++ b/tests/integration/test_distributed_inter_server_secret/test.py @@ -29,6 +29,7 @@ def make_instance(name, cfg, *args, **kwargs): backward = make_instance( "backward", "configs/remote_servers_backward.xml", + # NOTE(vnemkov): do not change to altinityinfra/clickhouse-server image="clickhouse/clickhouse-server", # version without DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET_V2 tag="23.2.3", diff --git a/tests/integration/test_jbod_load_balancing/configs/config.d/storage_configuration.xml b/tests/integration/test_jbod_load_balancing/configs/config.d/storage_configuration.xml index 529eb1bc0b51..5a47aab06f24 100644 --- a/tests/integration/test_jbod_load_balancing/configs/config.d/storage_configuration.xml +++ b/tests/integration/test_jbod_load_balancing/configs/config.d/storage_configuration.xml @@ -31,6 +31,7 @@ jbod3 least_used + 0 diff --git a/tests/integration/test_jbod_load_balancing/test.py b/tests/integration/test_jbod_load_balancing/test.py index 9c62d1bbdfca..204f9740cfde 100644 --- a/tests/integration/test_jbod_load_balancing/test.py +++ b/tests/integration/test_jbod_load_balancing/test.py @@ -134,3 +134,66 @@ def test_jbod_load_balancing_least_used_next_disk(start_cluster): ] finally: node.query("DROP TABLE IF EXISTS data_least_used_next_disk SYNC") + + +def test_jbod_load_balancing_least_used_detect_background_changes(start_cluster): + def get_parts_on_disks(): + parts = node.query( + """ + SELECT count(), disk_name + FROM system.parts + WHERE table = 'data_least_used_detect_background_changes' + GROUP BY disk_name + ORDER BY disk_name + """ + ) + parts = [l.split("\t") for l in parts.strip().split("\n")] + return parts + + try: + node.query( + """ + CREATE TABLE data_least_used_detect_background_changes (p UInt8) + ENGINE = MergeTree + ORDER BY tuple() + SETTINGS storage_policy = 'jbod_least_used'; + + SYSTEM STOP MERGES data_least_used_detect_background_changes; + """ + ) + + node.exec_in_container(["fallocate", "-l200M", "/jbod3/.test"]) + node.query( + """ + INSERT INTO data_least_used_detect_background_changes SELECT * FROM numbers(10); + INSERT INTO data_least_used_detect_background_changes SELECT * FROM numbers(10); + INSERT INTO data_least_used_detect_background_changes SELECT * FROM numbers(10); + INSERT INTO data_least_used_detect_background_changes SELECT * FROM numbers(10); + """ + ) + parts = get_parts_on_disks() + assert parts == [ + ["4", "jbod2"], + ] + + node.exec_in_container(["rm", "/jbod3/.test"]) + node.query( + """ + INSERT INTO data_least_used_detect_background_changes SELECT * FROM numbers(10); + INSERT INTO data_least_used_detect_background_changes SELECT * FROM numbers(10); + INSERT INTO data_least_used_detect_background_changes SELECT * FROM numbers(10); + INSERT INTO data_least_used_detect_background_changes SELECT * FROM numbers(10); + """ + ) + parts = get_parts_on_disks() + assert parts == [ + # previous INSERT + ["4", "jbod2"], + # this INSERT + ["4", "jbod3"], + ] + finally: + node.exec_in_container(["rm", "-f", "/jbod3/.test"]) + node.query( + "DROP TABLE IF EXISTS data_least_used_detect_background_changes SYNC" + ) diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index b1191af60b7c..bc9a540dc067 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -30,12 +30,24 @@ from kafka.protocol.group import MemberAssignment from kafka.admin import NewTopic +from pathlib import Path +from helpers.cluster import run_and_check # protoc --version # libprotoc 3.0.0 # # to create kafka_pb2.py # protoc --python_out=. kafka.proto +# Regenerate _pb2 files on each run, to make sure test doesn't depend installed protobuf version +proto_dir = Path(__file__).parent / "clickhouse_path/format_schemas" +gen_dir = Path(__file__).parent +gen_dir.mkdir(exist_ok=True) +run_and_check( + f"python3 -m grpc_tools.protoc -I{proto_dir!s} --python_out={gen_dir!s} --grpc_python_out={gen_dir!s} \ + {proto_dir!s}/kafka.proto", + shell=True, +) + from . import kafka_pb2 from . import social_pb2 from . import message_with_repeated_pb2 diff --git a/tests/queries/0_stateless/02875_final_invalid_read_ranges_bug.reference b/tests/queries/0_stateless/02875_final_invalid_read_ranges_bug.reference new file mode 100644 index 000000000000..573541ac9702 --- /dev/null +++ b/tests/queries/0_stateless/02875_final_invalid_read_ranges_bug.reference @@ -0,0 +1 @@ +0 diff --git a/tests/queries/0_stateless/02875_final_invalid_read_ranges_bug.sql b/tests/queries/0_stateless/02875_final_invalid_read_ranges_bug.sql new file mode 100644 index 000000000000..4e91c2e31676 --- /dev/null +++ b/tests/queries/0_stateless/02875_final_invalid_read_ranges_bug.sql @@ -0,0 +1,20 @@ +CREATE TABLE t +( + tid UInt64, + processed_at DateTime, + created_at DateTime, + amount Int64 +) +ENGINE = ReplacingMergeTree +PARTITION BY toStartOfQuarter(created_at) +PRIMARY KEY (toStartOfDay(created_at), toStartOfDay(processed_at)) +ORDER BY (toStartOfDay(created_at), toStartOfDay(processed_at), tid) +SETTINGS index_granularity = 1; + +INSERT INTO t VALUES (5879429,'2023-07-01 03:50:35','2023-07-01 03:50:35',-278) (5881397,'2023-07-01 06:22:26','2023-07-01 06:22:27',2807) (5925060,'2023-07-04 00:24:03','2023-07-04 00:24:02',-12) (5936591,'2023-07-04 07:37:19','2023-07-04 07:37:18',-12) (5940709,'2023-07-04 09:13:35','2023-07-04 09:13:35',2820) (5942342,'2023-07-04 09:58:00','2023-07-04 09:57:59',-12) (5952231,'2023-07-04 22:33:24','2023-07-04 22:33:24',1692) (5959449,'2023-07-05 04:32:55','2023-07-05 04:32:54',-12) (5963240,'2023-07-05 06:37:08','2023-07-05 06:37:09',1709) (5965742,'2023-07-05 07:27:01','2023-07-05 07:27:02',1709) (5969948,'2023-07-05 08:44:36','2023-07-05 08:44:37',2278) (5971673,'2023-07-05 09:14:09','2023-07-05 09:14:09',5695) (6012987,'2023-07-06 20:52:28','2023-07-06 20:52:27',-536); + +SELECT sum(amount) +FROM t FINAL +WHERE (processed_at >= '2023-09-19 00:00:00') AND (processed_at <= '2023-09-20 01:00:00'); + +DROP TABLE t; diff --git a/tests/queries/0_stateless/02875_merge_engine_set_index.reference b/tests/queries/0_stateless/02875_merge_engine_set_index.reference new file mode 100644 index 000000000000..00750edc07d6 --- /dev/null +++ b/tests/queries/0_stateless/02875_merge_engine_set_index.reference @@ -0,0 +1 @@ +3 diff --git a/tests/queries/0_stateless/02875_merge_engine_set_index.sh b/tests/queries/0_stateless/02875_merge_engine_set_index.sh new file mode 100755 index 000000000000..57b5db374c19 --- /dev/null +++ b/tests/queries/0_stateless/02875_merge_engine_set_index.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +# shellcheck disable=SC2154 + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +$CLICKHOUSE_CLIENT -nq " + CREATE TABLE t1 + ( + a UInt32, + b UInt32 + ) + ENGINE = MergeTree + ORDER BY (a, b) + SETTINGS index_granularity = 8192; + + INSERT INTO t1 SELECT number, number FROM numbers_mt(1e6); + + CREATE TABLE t2 + ( + a UInt32, + b UInt32 + ) + ENGINE = MergeTree + ORDER BY (a, b) + SETTINGS index_granularity = 8192; + + INSERT INTO t2 VALUES (1, 1) (2, 2) (3, 3); + + CREATE TABLE t + ( + a UInt32, + b UInt32 + ) + ENGINE = Merge(currentDatabase(), 't*');" + +query_id="${CLICKHOUSE_DATABASE}_merge_engine_set_index_$RANDOM$RANDOM" +$CLICKHOUSE_CLIENT --query_id="$query_id" --multiquery -q " +SELECT + a, + b +FROM t +WHERE (a, b) IN ( + SELECT DISTINCT + a, + b + FROM t2 +) +GROUP BY + a, + b +ORDER BY + a ASC, + b DESC +FORMAT Null;" + +$CLICKHOUSE_CLIENT -nq " +SYSTEM FLUSH LOGS; + +SELECT ProfileEvents['SelectedMarks'] +FROM system.query_log +WHERE event_date >= yesterday() AND current_database = currentDatabase() AND (query_id = '$query_id') AND (type = 'QueryFinish');" diff --git a/tests/queries/0_stateless/02890_partition_prune_in_extra_columns.reference b/tests/queries/0_stateless/02890_partition_prune_in_extra_columns.reference new file mode 100644 index 000000000000..d00491fd7e5b --- /dev/null +++ b/tests/queries/0_stateless/02890_partition_prune_in_extra_columns.reference @@ -0,0 +1 @@ +1 diff --git a/tests/queries/0_stateless/02890_partition_prune_in_extra_columns.sql b/tests/queries/0_stateless/02890_partition_prune_in_extra_columns.sql new file mode 100644 index 000000000000..29fd313b12de --- /dev/null +++ b/tests/queries/0_stateless/02890_partition_prune_in_extra_columns.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS e; + +CREATE TABLE e (dt DateTime, t Int32) ENGINE = MergeTree() PARTITION BY (t, toYYYYMM(dt)) ORDER BY tuple(); + +INSERT INTO e SELECT toDateTime('2022-12-12 11:00:00') + number, 86 FROM numbers(10); + +SELECT COUNT(*) FROM e WHERE (t, dt) IN (86, '2022-12-12 11:00:00'); + +DROP TABLE e; diff --git a/tests/queries/0_stateless/02908_alter_column_alias.reference b/tests/queries/0_stateless/02908_alter_column_alias.reference new file mode 100644 index 000000000000..e44df6e9ff60 --- /dev/null +++ b/tests/queries/0_stateless/02908_alter_column_alias.reference @@ -0,0 +1 @@ +CREATE TABLE default.t\n(\n `c0` DateTime,\n `c1` DateTime,\n `a` DateTime ALIAS c1\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02908_alter_column_alias.sql b/tests/queries/0_stateless/02908_alter_column_alias.sql new file mode 100644 index 000000000000..fd98339e8b51 --- /dev/null +++ b/tests/queries/0_stateless/02908_alter_column_alias.sql @@ -0,0 +1,8 @@ +CREATE TABLE t ( + c0 DateTime, + c1 DateTime, + a DateTime alias toStartOfFifteenMinutes(c0) +) ENGINE = MergeTree() ORDER BY tuple(); + +ALTER TABLE t MODIFY COLUMN a DateTime ALIAS c1; +SHOW CREATE t; diff --git a/utils/clickhouse-docker b/utils/clickhouse-docker index cfe515f1de54..34b637f0eaad 100755 --- a/utils/clickhouse-docker +++ b/utils/clickhouse-docker @@ -26,11 +26,11 @@ then # https://stackoverflow.com/a/39454426/1555175 wget -nv https://registry.hub.docker.com/v1/repositories/clickhouse/clickhouse-server/tags -O - | sed -e 's/[][]//g' -e 's/"//g' -e 's/ //g' | tr '}' '\n' | awk -F: '{print $3}' else - docker pull clickhouse/clickhouse-server:${param} + docker pull altinityinfra/clickhouse-server:${param} tmp_dir=$(mktemp -d -t ci-XXXXXXXXXX) # older version require /nonexistent folder to exist to run clickhouse client :D chmod 777 ${tmp_dir} set -e - containerid=`docker run -v${tmp_dir}:/nonexistent -d clickhouse/clickhouse-server:${param}` + containerid=`docker run -v${tmp_dir}:/nonexistent -d altinityinfra/clickhouse-server:${param}` set +e while : do diff --git a/utils/zero_copy/zero_copy_schema_converter.py b/utils/zero_copy/zero_copy_schema_converter.py index 6103ac69c6e3..f80f36cecf94 100755 --- a/utils/zero_copy/zero_copy_schema_converter.py +++ b/utils/zero_copy/zero_copy_schema_converter.py @@ -33,7 +33,7 @@ def parse_args(): parser.add_argument( "-z", "--zcroot", - default="clickhouse/zero_copy", + default="altinityinfra/zero_copy", help="ZooKeeper node for new zero-copy data", ) parser.add_argument(