From 65aa954adef8c20c7341602163d4fd10cf53f363 Mon Sep 17 00:00:00 2001 From: MyroTk Date: Tue, 1 Oct 2024 10:27:59 -0700 Subject: [PATCH 01/13] add support for maintenance runs --- .github/workflows/regression.yml | 3 +++ .github/workflows/release_branches.yml | 4 ++-- tests/ci/build_check.py | 4 ++++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index aa4b2f68904e..66d89ddede43 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -93,6 +93,9 @@ env: AWS_DEFAULT_REGION: ${{ secrets.AWS_REPORT_REGION }} DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }} + CHECKS_DATABASE_USER: ${{ secrets.CHECKS_DATABASE_USER }} + CHECKS_DATABASE_PASSWORD: ${{ secrets.CHECKS_DATABASE_PASSWORD }} args: --test-to-end --no-colors --local diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index 8a529b826278..a8113d891bd8 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -640,7 +640,7 @@ jobs: secrets: inherit with: runner_type: altinity-on-demand, altinity-type-cpx51, altinity-image-x86-app-docker-ce, altinity-setup-regression - commit: 66fff15e3afa278543a6b6ffab9be576e054a921 + commit: 91b2de501b77cd90ae9345abb7b56e5fa8c1a921 arch: release build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} @@ -650,7 +650,7 @@ jobs: secrets: inherit with: runner_type: altinity-on-demand, altinity-type-cax41, altinity-image-arm-app-docker-ce, altinity-setup-regression - commit: 66fff15e3afa278543a6b6ffab9be576e054a921 + commit: 91b2de501b77cd90ae9345abb7b56e5fa8c1a921 arch: aarch64 build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} diff --git a/tests/ci/build_check.py b/tests/ci/build_check.py index df72d19693ab..a0f7a5fc24f7 100644 --- a/tests/ci/build_check.py +++ b/tests/ci/build_check.py @@ -13,6 +13,7 @@ from docker_pull_helper import get_image_with_version from env_helper import ( GITHUB_JOB_API_URL, + GITHUB_RUN_ID, IMAGES_PATH, REPO_COPY, S3_ACCESS_KEY_ID, @@ -184,6 +185,9 @@ def get_release_or_pr(pr_info: PRInfo, version: ClickHouseVersion) -> Tuple[str, # It should be fixed in performance-comparison image eventually # For performance tests we always set PRs prefix performance_pr = "PRs/0" + if "commits" not in pr_info.event and "pull_request" not in pr_info.event: + # for dispatch maintenance run we use sha and run id + return f"maintenance/{pr_info.base_ref}/{GITHUB_RUN_ID}", performance_pr if "release" in pr_info.labels or "release-lts" in pr_info.labels: # for release pull requests we use branch names prefixes, not pr numbers return pr_info.head_ref, performance_pr From 8516d66015258518d7ac51cade1946573d811caa Mon Sep 17 00:00:00 2001 From: MyroTk <44327070+MyroTk@users.noreply.github.com> Date: Wed, 2 Oct 2024 08:33:57 -0700 Subject: [PATCH 02/13] Update docker_images_check.py --- tests/ci/docker_images_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ci/docker_images_check.py b/tests/ci/docker_images_check.py index e1e7709ca447..df64c7a6094c 100644 --- a/tests/ci/docker_images_check.py +++ b/tests/ci/docker_images_check.py @@ -111,7 +111,7 @@ def get_changed_docker_images( break # Rebuild all images on push or release - if pr_info.number == 0: + if pr_info.number == 0 or pr_info.number == f"{pr_info.version.major}.{pr_info.version.minor}.{pr_info.version.patch}": changed_images = all_images else: From 07d98304736fbfbb90e74cc8af04fe6816c0b8fb Mon Sep 17 00:00:00 2001 From: MyroTk <44327070+MyroTk@users.noreply.github.com> Date: Wed, 2 Oct 2024 13:21:51 -0700 Subject: [PATCH 03/13] Update Dockerfile --- docker/test/util/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/test/util/Dockerfile b/docker/test/util/Dockerfile index f2041fe445c3..deff1273ac7c 100644 --- a/docker/test/util/Dockerfile +++ b/docker/test/util/Dockerfile @@ -12,7 +12,7 @@ RUN apt-get update \ && apt-get install \ apt-transport-https='2.4.*' \ apt-utils='2.4.*' \ - ca-certificates='20230311ubuntu0.22.04.*' \ + ca-certificates='20240203~22.04*' \ curl='7.81.*' \ dnsutils='1:9.18.*' \ gnupg='2.2.*' \ From 7082e220a6741ccd51871bc0b6f912ff83cd888c Mon Sep 17 00:00:00 2001 From: Alexey Milovidov Date: Sat, 11 Nov 2023 02:29:04 +0100 Subject: [PATCH 04/13] Merge pull request #56502 from amosbird/fix-56481 Fix two cases of projection analysis. --- src/Core/Settings.h | 1 + .../optimizeUseAggregateProjection.cpp | 103 ++++++++++++------ .../optimizeUseNormalProjection.cpp | 40 ++++++- .../Optimizations/projectionsCommon.cpp | 17 ++- .../Optimizations/projectionsCommon.h | 3 +- .../QueryPlan/ReadFromMergeTree.cpp | 21 +++- src/Processors/QueryPlan/ReadFromMergeTree.h | 10 +- src/Storages/MergeTree/MergeTreeData.cpp | 20 +++- src/Storages/MergeTree/MergeTreeData.h | 6 +- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 10 +- .../MergeTree/MergeTreeDataSelectExecutor.h | 8 +- src/Storages/MergeTree/PartitionPruner.cpp | 2 +- src/Storages/MergeTree/PartitionPruner.h | 5 +- src/Storages/StorageMergeTree.cpp | 6 +- ...jection_analysis_reuse_partition.reference | 1 + ...710_projection_analysis_reuse_partition.sh | 16 +++ ...rojection_with_alter_conversions.reference | 1 + ...1710_projection_with_alter_conversions.sql | 15 +++ .../queries/0_stateless/01710_projections.sql | 2 +- 19 files changed, 207 insertions(+), 80 deletions(-) create mode 100644 tests/queries/0_stateless/01710_projection_analysis_reuse_partition.reference create mode 100755 tests/queries/0_stateless/01710_projection_analysis_reuse_partition.sh create mode 100644 tests/queries/0_stateless/01710_projection_with_alter_conversions.reference create mode 100644 tests/queries/0_stateless/01710_projection_with_alter_conversions.sql diff --git a/src/Core/Settings.h b/src/Core/Settings.h index b3fe90b86446..eab06e06db0f 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -594,6 +594,7 @@ class IColumn; M(Bool, optimize_use_projections, true, "Automatically choose projections to perform SELECT query", 0) ALIAS(allow_experimental_projection_optimization) \ M(Bool, optimize_use_implicit_projections, true, "Automatically choose implicit projections to perform SELECT query", 0) \ M(Bool, force_optimize_projection, false, "If projection optimization is enabled, SELECT queries need to use projection", 0) \ + M(String, preferred_optimize_projection_name, "", "If it is set to a non-empty string, ClickHouse tries to apply specified projection", 0) \ M(Bool, async_socket_for_remote, true, "Asynchronously read from socket executing remote query", 0) \ M(Bool, async_query_sending_for_remote, true, "Asynchronously create connections and send query to shards in remote query", 0) \ M(Bool, insert_null_as_default, true, "Insert DEFAULT values instead of NULL in INSERT SELECT (UNION ALL)", 0) \ diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp index 0599a0fa369d..f4e2a6b7dbd1 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp @@ -411,7 +411,6 @@ struct MinMaxProjectionCandidate { AggregateProjectionCandidate candidate; Block block; - MergeTreeData::DataPartsVector normal_parts; }; struct AggregateProjectionCandidates @@ -476,7 +475,6 @@ AggregateProjectionCandidates getAggregateProjectionCandidates( { // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Projection analyzed DAG {}", proj_dag->dumpDAG()); AggregateProjectionCandidate candidate{.info = std::move(info), .dag = std::move(proj_dag)}; - MergeTreeData::DataPartsVector minmax_projection_normal_parts; // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Projection sample block {}", sample_block.dumpStructure()); auto block = reading.getMergeTreeData().getMinMaxCountProjectionBlock( @@ -485,13 +483,13 @@ AggregateProjectionCandidates getAggregateProjectionCandidates( dag.filter_node != nullptr, query_info, parts, - minmax_projection_normal_parts, + nullptr, max_added_blocks.get(), context); // LOG_TRACE(&Poco::Logger::get("optimizeUseProjections"), "Projection sample block 2 {}", block.dumpStructure()); - // minmax_count_projection cannot be used used when there is no data to process, because + // minmax_count_projection cannot be used when there is no data to process, because // it will produce incorrect result during constant aggregation. // See https://github.com/ClickHouse/ClickHouse/issues/36728 if (block) @@ -499,7 +497,6 @@ AggregateProjectionCandidates getAggregateProjectionCandidates( MinMaxProjectionCandidate minmax; minmax.candidate = std::move(candidate); minmax.block = std::move(block); - minmax.normal_parts = std::move(minmax_projection_normal_parts); minmax.candidate.projection = projection; candidates.minmax_projection.emplace(std::move(minmax)); } @@ -508,6 +505,18 @@ AggregateProjectionCandidates getAggregateProjectionCandidates( if (!candidates.minmax_projection) { + auto it = std::find_if(agg_projections.begin(), agg_projections.end(), [&](const auto * projection) + { + return projection->name == context->getSettings().preferred_optimize_projection_name.value; + }); + + if (it != agg_projections.end()) + { + const ProjectionDescription * preferred_projection = *it; + agg_projections.clear(); + agg_projections.push_back(preferred_projection); + } + candidates.real.reserve(agg_projections.size()); for (const auto * projection : agg_projections) { @@ -569,49 +578,75 @@ bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & auto candidates = getAggregateProjectionCandidates(node, *aggregating, *reading, max_added_blocks, allow_implicit_projections); - AggregateProjectionCandidate * best_candidate = nullptr; - if (candidates.minmax_projection) - best_candidate = &candidates.minmax_projection->candidate; - else if (candidates.real.empty()) - return false; - const auto & parts = reading->getParts(); + const auto & alter_conversions = reading->getAlterConvertionsForParts(); const auto & query_info = reading->getQueryInfo(); const auto metadata = reading->getStorageMetadata(); ContextPtr context = reading->getContext(); MergeTreeDataSelectExecutor reader(reading->getMergeTreeData()); + AggregateProjectionCandidate * best_candidate = nullptr; - auto ordinary_reading_select_result = reading->selectRangesToRead(parts, /* alter_conversions = */ {}); - size_t ordinary_reading_marks = ordinary_reading_select_result->marks(); - - /// Selecting best candidate. - for (auto & candidate : candidates.real) + if (candidates.minmax_projection) { - auto required_column_names = candidate.dag->getRequiredColumnsNames(); - ActionDAGNodes added_filter_nodes; - if (candidates.has_filter) - added_filter_nodes.nodes.push_back(candidate.dag->getOutputs().front()); + best_candidate = &candidates.minmax_projection->candidate; + } + else if (!candidates.real.empty()) + { + auto ordinary_reading_select_result = reading->selectRangesToRead(parts, alter_conversions); + size_t ordinary_reading_marks = ordinary_reading_select_result->marks(); + + /// Nothing to read. Ignore projections. + if (ordinary_reading_marks == 0) + { + reading->setAnalyzedResult(std::move(ordinary_reading_select_result)); + return false; + } - bool analyzed = analyzeProjectionCandidate( - candidate, *reading, reader, required_column_names, parts, - metadata, query_info, context, max_added_blocks, added_filter_nodes); + const auto & parts_with_ranges = ordinary_reading_select_result->partsWithRanges(); - if (!analyzed) - continue; + /// Selecting best candidate. + for (auto & candidate : candidates.real) + { + auto required_column_names = candidate.dag->getRequiredColumnsNames(); + ActionDAGNodes added_filter_nodes; + if (candidates.has_filter) + added_filter_nodes.nodes.push_back(candidate.dag->getOutputs().front()); + + bool analyzed = analyzeProjectionCandidate( + candidate, + *reading, + reader, + required_column_names, + parts_with_ranges, + metadata, + query_info, + context, + max_added_blocks, + added_filter_nodes); - if (candidate.sum_marks > ordinary_reading_marks) - continue; + if (!analyzed) + continue; - if (best_candidate == nullptr || best_candidate->sum_marks > candidate.sum_marks) - best_candidate = &candidate; - } + if (candidate.sum_marks > ordinary_reading_marks) + continue; + + if (best_candidate == nullptr || best_candidate->sum_marks > candidate.sum_marks) + best_candidate = &candidate; + } - if (!best_candidate) + if (!best_candidate) + { + reading->setAnalyzedResult(std::move(ordinary_reading_select_result)); + return false; + } + } + else { - reading->setAnalyzedResult(std::move(ordinary_reading_select_result)); return false; } + chassert(best_candidate != nullptr); + QueryPlanStepPtr projection_reading; bool has_ordinary_parts; @@ -632,9 +667,7 @@ bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & .storage_id = reading->getMergeTreeData().getStorageID(), .projection_name = candidates.minmax_projection->candidate.projection->name, }); - has_ordinary_parts = !candidates.minmax_projection->normal_parts.empty(); - if (has_ordinary_parts) - reading->resetParts(std::move(candidates.minmax_projection->normal_parts)); + has_ordinary_parts = false; } else { diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp index fbe02265dcc6..d4acf36e0012 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp @@ -8,7 +8,7 @@ #include #include #include -#include +#include namespace DB::QueryPlanOptimizations { @@ -107,6 +107,19 @@ bool optimizeUseNormalProjections(Stack & stack, QueryPlan::Nodes & nodes) if (normal_projections.empty()) return false; + ContextPtr context = reading->getContext(); + auto it = std::find_if(normal_projections.begin(), normal_projections.end(), [&](const auto * projection) + { + return projection->name == context->getSettings().preferred_optimize_projection_name.value; + }); + + if (it != normal_projections.end()) + { + const ProjectionDescription * preferred_projection = *it; + normal_projections.clear(); + normal_projections.push_back(preferred_projection); + } + QueryDAG query; { auto & child = iter->node->children[iter->next_child - 1]; @@ -122,13 +135,22 @@ bool optimizeUseNormalProjections(Stack & stack, QueryPlan::Nodes & nodes) const Names & required_columns = reading->getRealColumnNames(); const auto & parts = reading->getParts(); + const auto & alter_conversions = reading->getAlterConvertionsForParts(); const auto & query_info = reading->getQueryInfo(); - ContextPtr context = reading->getContext(); MergeTreeDataSelectExecutor reader(reading->getMergeTreeData()); - auto ordinary_reading_select_result = reading->selectRangesToRead(parts, /* alter_conversions = */ {}); + auto ordinary_reading_select_result = reading->selectRangesToRead(parts, alter_conversions); size_t ordinary_reading_marks = ordinary_reading_select_result->marks(); + /// Nothing to read. Ignore projections. + if (ordinary_reading_marks == 0) + { + reading->setAnalyzedResult(std::move(ordinary_reading_select_result)); + return false; + } + + const auto & parts_with_ranges = ordinary_reading_select_result->partsWithRanges(); + std::shared_ptr max_added_blocks = getMaxAddedBlocks(reading); for (const auto * projection : normal_projections) @@ -144,8 +166,16 @@ bool optimizeUseNormalProjections(Stack & stack, QueryPlan::Nodes & nodes) added_filter_nodes.nodes.push_back(query.filter_node); bool analyzed = analyzeProjectionCandidate( - candidate, *reading, reader, required_columns, parts, - metadata, query_info, context, max_added_blocks, added_filter_nodes); + candidate, + *reading, + reader, + required_columns, + parts_with_ranges, + metadata, + query_info, + context, + max_added_blocks, + added_filter_nodes); if (!analyzed) continue; diff --git a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp index 7ddda29cad43..c3b3449857b0 100644 --- a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp +++ b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp @@ -210,7 +210,7 @@ bool analyzeProjectionCandidate( const ReadFromMergeTree & reading, const MergeTreeDataSelectExecutor & reader, const Names & required_column_names, - const MergeTreeData::DataPartsVector & parts, + const RangesInDataParts & parts_with_ranges, const StorageMetadataPtr & metadata, const SelectQueryInfo & query_info, const ContextPtr & context, @@ -219,14 +219,20 @@ bool analyzeProjectionCandidate( { MergeTreeData::DataPartsVector projection_parts; MergeTreeData::DataPartsVector normal_parts; - for (const auto & part : parts) + std::vector alter_conversions; + for (const auto & part_with_ranges : parts_with_ranges) { - const auto & created_projections = part->getProjectionParts(); + const auto & created_projections = part_with_ranges.data_part->getProjectionParts(); auto it = created_projections.find(candidate.projection->name); if (it != created_projections.end()) + { projection_parts.push_back(it->second); + } else - normal_parts.push_back(part); + { + normal_parts.push_back(part_with_ranges.data_part); + alter_conversions.push_back(part_with_ranges.alter_conversions); + } } if (projection_parts.empty()) @@ -252,7 +258,8 @@ bool analyzeProjectionCandidate( if (!normal_parts.empty()) { - auto normal_result_ptr = reading.selectRangesToRead(std::move(normal_parts), /* alter_conversions = */ {}); + /// TODO: We can reuse existing analysis_result by filtering out projection parts + auto normal_result_ptr = reading.selectRangesToRead(std::move(normal_parts), std::move(alter_conversions)); if (normal_result_ptr->error()) return false; diff --git a/src/Processors/QueryPlan/Optimizations/projectionsCommon.h b/src/Processors/QueryPlan/Optimizations/projectionsCommon.h index 35daccad1154..055ca5d40848 100644 --- a/src/Processors/QueryPlan/Optimizations/projectionsCommon.h +++ b/src/Processors/QueryPlan/Optimizations/projectionsCommon.h @@ -19,6 +19,7 @@ using MergeTreeDataSelectAnalysisResultPtr = std::shared_ptr; using DataPartsVector = std::vector; +struct RangesInDataParts; struct StorageInMemoryMetadata; using StorageMetadataPtr = std::shared_ptr; @@ -71,7 +72,7 @@ bool analyzeProjectionCandidate( const ReadFromMergeTree & reading, const MergeTreeDataSelectExecutor & reader, const Names & required_column_names, - const DataPartsVector & parts, + const RangesInDataParts & parts_with_ranges, const StorageMetadataPtr & metadata, const SelectQueryInfo & query_info, const ContextPtr & context, diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 0b5eb94dbaca..bc43961edf5b 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -2157,10 +2157,23 @@ size_t MergeTreeDataSelectAnalysisResult::marks() const if (std::holds_alternative(result)) std::rethrow_exception(std::get(result)); - const auto & index_stats = std::get(result).index_stats; - if (index_stats.empty()) - return 0; - return index_stats.back().num_granules_after; + return std::get(result).selected_marks; +} + +UInt64 MergeTreeDataSelectAnalysisResult::rows() const +{ + if (std::holds_alternative(result)) + std::rethrow_exception(std::get(result)); + + return std::get(result).selected_rows; +} + +const RangesInDataParts & MergeTreeDataSelectAnalysisResult::partsWithRanges() const +{ + if (std::holds_alternative(result)) + std::rethrow_exception(std::get(result)); + + return std::get(result).parts_with_ranges; } } diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index cb2a3a8ddf9e..e3406addf6c3 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -214,13 +214,9 @@ class ReadFromMergeTree final : public SourceStepWithFilter bool hasAnalyzedResult() const { return analyzed_result_ptr != nullptr; } void setAnalyzedResult(MergeTreeDataSelectAnalysisResultPtr analyzed_result_ptr_) { analyzed_result_ptr = std::move(analyzed_result_ptr_); } - void resetParts(MergeTreeData::DataPartsVector parts) - { - prepared_parts = std::move(parts); - alter_conversions_for_parts = {}; - } - const MergeTreeData::DataPartsVector & getParts() const { return prepared_parts; } + const std::vector & getAlterConvertionsForParts() const { return alter_conversions_for_parts; } + const MergeTreeData & getMergeTreeData() const { return data; } size_t getMaxBlockSize() const { return max_block_size; } size_t getNumStreams() const { return requested_num_streams; } @@ -330,6 +326,8 @@ struct MergeTreeDataSelectAnalysisResult bool error() const; size_t marks() const; + UInt64 rows() const; + const RangesInDataParts & partsWithRanges() const; }; } diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 6bb5231f998d..145875938167 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -6596,7 +6596,7 @@ Block MergeTreeData::getMinMaxCountProjectionBlock( bool has_filter, const SelectQueryInfo & query_info, const DataPartsVector & parts, - DataPartsVector & normal_parts, + DataPartsVector * normal_parts, const PartitionIdToMaxBlock * max_block_numbers_to_read, ContextPtr query_context) const { @@ -6721,10 +6721,22 @@ Block MergeTreeData::getMinMaxCountProjectionBlock( continue; } + /// It's extremely rare that some parts have final marks while others don't. To make it + /// straightforward, disable minmax_count projection when `max(pk)' encounters any part with + /// no final mark. if (need_primary_key_max_column && !part->index_granularity.hasFinalMark()) { - normal_parts.push_back(part); - continue; + if (normal_parts) + { + // 23.8 behaviour + normal_parts->push_back(part); + continue; + } + else + { + // 23.12 behaviour + return {}; + } } real_parts.push_back(part); @@ -7161,7 +7173,7 @@ std::optional MergeTreeData::getQueryProcessingStageWithAgg !query_info.filter_asts.empty() || analysis_result.prewhere_info || analysis_result.before_where, query_info, parts, - normal_parts, + &normal_parts, max_added_blocks.get(), query_context); diff --git a/src/Storages/MergeTree/MergeTreeData.h b/src/Storages/MergeTree/MergeTreeData.h index 22d7a070ad94..1aeacf7c873c 100644 --- a/src/Storages/MergeTree/MergeTreeData.h +++ b/src/Storages/MergeTree/MergeTreeData.h @@ -396,17 +396,13 @@ class MergeTreeData : public IStorage, public WithMutableContext /// query_info - used to filter unneeded parts /// /// parts - part set to filter - /// - /// normal_parts - collects parts that don't have all the needed values to form the block. - /// Specifically, this is when a part doesn't contain a final mark and the related max value is - /// required. Block getMinMaxCountProjectionBlock( const StorageMetadataPtr & metadata_snapshot, const Names & required_columns, bool has_filter, const SelectQueryInfo & query_info, const DataPartsVector & parts, - DataPartsVector & normal_parts, + DataPartsVector * normal_parts, const PartitionIdToMaxBlock * max_block_numbers_to_read, ContextPtr query_context) const; diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index afb73ff38592..5b844d80211d 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -797,8 +797,8 @@ std::optional> MergeTreeDataSelectExecutor::filterPar } void MergeTreeDataSelectExecutor::filterPartsByPartition( - std::optional & partition_pruner, - std::optional & minmax_idx_condition, + const std::optional & partition_pruner, + const std::optional & minmax_idx_condition, MergeTreeData::DataPartsVector & parts, std::vector & alter_conversions, const std::optional> & part_values, @@ -1253,6 +1253,8 @@ MergeTreeDataSelectAnalysisResultPtr MergeTreeDataSelectExecutor::estimateNumMar selectColumnNames(column_names_to_return, data, real_column_names, virt_column_names, sample_factor_column_queried); std::optional indexes; + /// NOTE: We don't need alter_conversions because the returned analysis_result is only used for: + /// 1. estimate the number of rows to read; 2. projection reading, which doesn't have alter_conversions. return ReadFromMergeTree::selectRangesToRead( std::move(parts), /*alter_conversions=*/ {}, @@ -1785,7 +1787,7 @@ void MergeTreeDataSelectExecutor::selectPartsToRead( const std::optional> & part_values, const std::optional & minmax_idx_condition, const DataTypes & minmax_columns_types, - std::optional & partition_pruner, + const std::optional & partition_pruner, const PartitionIdToMaxBlock * max_block_numbers_to_read, PartFilterCounters & counters) { @@ -1847,7 +1849,7 @@ void MergeTreeDataSelectExecutor::selectPartsToReadWithUUIDFilter( MergeTreeData::PinnedPartUUIDsPtr pinned_part_uuids, const std::optional & minmax_idx_condition, const DataTypes & minmax_columns_types, - std::optional & partition_pruner, + const std::optional & partition_pruner, const PartitionIdToMaxBlock * max_block_numbers_to_read, ContextPtr query_context, PartFilterCounters & counters, diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h index 74d8d8e3c8f7..05d11a858b33 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h @@ -126,7 +126,7 @@ class MergeTreeDataSelectExecutor const std::optional> & part_values, const std::optional & minmax_idx_condition, const DataTypes & minmax_columns_types, - std::optional & partition_pruner, + const std::optional & partition_pruner, const PartitionIdToMaxBlock * max_block_numbers_to_read, PartFilterCounters & counters); @@ -138,7 +138,7 @@ class MergeTreeDataSelectExecutor MergeTreeData::PinnedPartUUIDsPtr pinned_part_uuids, const std::optional & minmax_idx_condition, const DataTypes & minmax_columns_types, - std::optional & partition_pruner, + const std::optional & partition_pruner, const PartitionIdToMaxBlock * max_block_numbers_to_read, ContextPtr query_context, PartFilterCounters & counters, @@ -172,8 +172,8 @@ class MergeTreeDataSelectExecutor /// Filter parts using minmax index and partition key. static void filterPartsByPartition( - std::optional & partition_pruner, - std::optional & minmax_idx_condition, + const std::optional & partition_pruner, + const std::optional & minmax_idx_condition, MergeTreeData::DataPartsVector & parts, std::vector & alter_conversions, const std::optional> & part_values, diff --git a/src/Storages/MergeTree/PartitionPruner.cpp b/src/Storages/MergeTree/PartitionPruner.cpp index 97bb9f3b4d43..a5df08e3df96 100644 --- a/src/Storages/MergeTree/PartitionPruner.cpp +++ b/src/Storages/MergeTree/PartitionPruner.cpp @@ -31,7 +31,7 @@ PartitionPruner::PartitionPruner(const StorageMetadataPtr & metadata, ActionsDAG { } -bool PartitionPruner::canBePruned(const IMergeTreeDataPart & part) +bool PartitionPruner::canBePruned(const IMergeTreeDataPart & part) const { if (part.isEmpty()) return true; diff --git a/src/Storages/MergeTree/PartitionPruner.h b/src/Storages/MergeTree/PartitionPruner.h index 7f1b74795c4c..e8a740b15245 100644 --- a/src/Storages/MergeTree/PartitionPruner.h +++ b/src/Storages/MergeTree/PartitionPruner.h @@ -16,14 +16,15 @@ class PartitionPruner PartitionPruner(const StorageMetadataPtr & metadata, const SelectQueryInfo & query_info, ContextPtr context, bool strict); PartitionPruner(const StorageMetadataPtr & metadata, ActionsDAGPtr filter_actions_dag, ContextPtr context, bool strict); - bool canBePruned(const IMergeTreeDataPart & part); + bool canBePruned(const IMergeTreeDataPart & part) const; bool isUseless() const { return useless; } const KeyCondition & getKeyCondition() const { return partition_condition; } private: - std::unordered_map partition_filter_map; + /// Cache already analyzed partitions. + mutable std::unordered_map partition_filter_map; /// partition_key is adjusted here (with substitution from modulo to moduloLegacy). KeyDescription partition_key; diff --git a/src/Storages/StorageMergeTree.cpp b/src/Storages/StorageMergeTree.cpp index 850f469b03b1..4f49a6651133 100644 --- a/src/Storages/StorageMergeTree.cpp +++ b/src/Storages/StorageMergeTree.cpp @@ -345,6 +345,8 @@ void StorageMergeTree::alter( prev_mutation = it->first; } + /// Always wait previous mutations synchronously, because alters + /// should be executed in sequential order. if (prev_mutation != 0) { LOG_DEBUG(log, "Cannot change metadata with barrier alter query, will wait for mutation {}", prev_mutation); @@ -372,9 +374,7 @@ void StorageMergeTree::alter( resetObjectColumnsFromActiveParts(parts_lock); } - /// Always execute required mutations synchronously, because alters - /// should be executed in sequential order. - if (!maybe_mutation_commands.empty()) + if (!maybe_mutation_commands.empty() && local_context->getSettingsRef().alter_sync > 0) waitForMutation(mutation_version, false); } diff --git a/tests/queries/0_stateless/01710_projection_analysis_reuse_partition.reference b/tests/queries/0_stateless/01710_projection_analysis_reuse_partition.reference new file mode 100644 index 000000000000..47b07da250f1 --- /dev/null +++ b/tests/queries/0_stateless/01710_projection_analysis_reuse_partition.reference @@ -0,0 +1 @@ +Selected 2/2 parts by partition key, 1 parts by primary key, 1/2 marks by primary key, 1 marks to read from 1 ranges diff --git a/tests/queries/0_stateless/01710_projection_analysis_reuse_partition.sh b/tests/queries/0_stateless/01710_projection_analysis_reuse_partition.sh new file mode 100755 index 000000000000..ba8b3818ba38 --- /dev/null +++ b/tests/queries/0_stateless/01710_projection_analysis_reuse_partition.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +${CLICKHOUSE_CLIENT} -q "drop table if exists t" +${CLICKHOUSE_CLIENT} -q "create table t(s LowCardinality(String), e DateTime64(3), projection p1 (select * order by s, e)) engine MergeTree partition by toYYYYMM(e) order by tuple() settings index_granularity = 8192, index_granularity_bytes = '100M'" +${CLICKHOUSE_CLIENT} -q "insert into t select 'AAP', toDateTime('2023-07-01') + 360 * number from numbers(50000)" +${CLICKHOUSE_CLIENT} -q "insert into t select 'AAPL', toDateTime('2023-07-01') + 360 * number from numbers(50000)" + +CLICKHOUSE_CLIENT_DEBUG_LOG=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=debug/g') + +${CLICKHOUSE_CLIENT_DEBUG_LOG} -q "select count() from t where e >= '2023-11-08 00:00:00.000' and e < '2023-11-09 00:00:00.000' and s in ('AAPL') format Null" 2>&1 | grep -oh "Selected .* parts by partition key, *. parts by primary key, .* marks by primary key, .* marks to read from .* ranges.*$" + +${CLICKHOUSE_CLIENT} -q "drop table t" diff --git a/tests/queries/0_stateless/01710_projection_with_alter_conversions.reference b/tests/queries/0_stateless/01710_projection_with_alter_conversions.reference new file mode 100644 index 000000000000..9874d6464ab7 --- /dev/null +++ b/tests/queries/0_stateless/01710_projection_with_alter_conversions.reference @@ -0,0 +1 @@ +1 2 diff --git a/tests/queries/0_stateless/01710_projection_with_alter_conversions.sql b/tests/queries/0_stateless/01710_projection_with_alter_conversions.sql new file mode 100644 index 000000000000..649a07b9b5f1 --- /dev/null +++ b/tests/queries/0_stateless/01710_projection_with_alter_conversions.sql @@ -0,0 +1,15 @@ +drop table if exists t; + +create table t (i int, j int, projection p (select i order by i)) engine MergeTree order by tuple(); + +insert into t values (1, 2); + +system stop merges t; + +set alter_sync = 0; + +alter table t rename column j to k; + +select * from t; + +drop table t; diff --git a/tests/queries/0_stateless/01710_projections.sql b/tests/queries/0_stateless/01710_projections.sql index a96339e30fac..7c45792847e7 100644 --- a/tests/queries/0_stateless/01710_projections.sql +++ b/tests/queries/0_stateless/01710_projections.sql @@ -1,6 +1,6 @@ drop table if exists projection_test; -create table projection_test (`sum(block_count)` UInt64, domain_alias UInt64 alias length(domain), datetime DateTime, domain LowCardinality(String), x_id String, y_id String, block_count Int64, retry_count Int64, duration Int64, kbytes Int64, buffer_time Int64, first_time Int64, total_bytes Nullable(UInt64), valid_bytes Nullable(UInt64), completed_bytes Nullable(UInt64), fixed_bytes Nullable(UInt64), force_bytes Nullable(UInt64), projection p (select toStartOfMinute(datetime) dt_m, countIf(first_time = 0) / count(), avg((kbytes * 8) / duration), count(), sum(block_count) / sum(duration), avg(block_count / duration), sum(buffer_time) / sum(duration), avg(buffer_time / duration), sum(valid_bytes) / sum(total_bytes), sum(completed_bytes) / sum(total_bytes), sum(fixed_bytes) / sum(total_bytes), sum(force_bytes) / sum(total_bytes), sum(valid_bytes) / sum(total_bytes), sum(retry_count) / sum(duration), avg(retry_count / duration), countIf(block_count > 0) / count(), countIf(first_time = 0) / count(), uniqHLL12(x_id), uniqHLL12(y_id) group by dt_m, domain)) engine MergeTree partition by toDate(datetime) order by (toStartOfTenMinutes(datetime), domain) settings index_granularity_bytes = 10000000; +create table projection_test (`sum(block_count)` UInt64, domain_alias UInt64 alias length(domain), datetime DateTime, domain LowCardinality(String), x_id String, y_id String, block_count Int64, retry_count Int64, duration Int64, kbytes Int64, buffer_time Int64, first_time Int64, total_bytes Nullable(UInt64), valid_bytes Nullable(UInt64), completed_bytes Nullable(UInt64), fixed_bytes Nullable(UInt64), force_bytes Nullable(UInt64), projection p (select toStartOfMinute(datetime) dt_m, countIf(first_time = 0) / count(), avg((kbytes * 8) / duration), count(), sum(block_count) / sum(duration), avg(block_count / duration), sum(buffer_time) / sum(duration), avg(buffer_time / duration), sum(valid_bytes) / sum(total_bytes), sum(completed_bytes) / sum(total_bytes), sum(fixed_bytes) / sum(total_bytes), sum(force_bytes) / sum(total_bytes), sum(valid_bytes) / sum(total_bytes), sum(retry_count) / sum(duration), avg(retry_count / duration), countIf(block_count > 0) / count(), countIf(first_time = 0) / count(), uniqHLL12(x_id), uniqHLL12(y_id) group by dt_m, domain)) engine MergeTree partition by toDate(datetime) order by toStartOfTenMinutes(datetime) settings index_granularity_bytes = 10000000; insert into projection_test with rowNumberInAllBlocks() as id select 1, toDateTime('2020-10-24 00:00:00') + (id / 20), toString(id % 100), * from generateRandom('x_id String, y_id String, block_count Int64, retry_count Int64, duration Int64, kbytes Int64, buffer_time Int64, first_time Int64, total_bytes Nullable(UInt64), valid_bytes Nullable(UInt64), completed_bytes Nullable(UInt64), fixed_bytes Nullable(UInt64), force_bytes Nullable(UInt64)', 10, 10, 1) limit 1000 settings max_threads = 1; From eb5814ec4bd9b38402d02c0d414e78c06ac81a40 Mon Sep 17 00:00:00 2001 From: Nikolai Kochetov Date: Thu, 11 Jan 2024 14:17:48 +0100 Subject: [PATCH 05/13] Merge pull request #58638 from amosbird/fix-58620 Fix broken partition key analysis when doing projection optimization --- .../optimizeUseAggregateProjection.cpp | 1 - .../Optimizations/optimizeUseNormalProjection.cpp | 1 - .../QueryPlan/Optimizations/projectionsCommon.cpp | 2 -- .../QueryPlan/Optimizations/projectionsCommon.h | 1 - src/Processors/QueryPlan/ReadFromMergeTree.cpp | 7 +------ src/Processors/QueryPlan/ReadFromMergeTree.h | 2 -- src/Storages/MergeTree/MergeTreeData.cpp | 5 ----- .../MergeTree/MergeTreeDataSelectExecutor.cpp | 3 +-- .../MergeTree/MergeTreeDataSelectExecutor.h | 1 - .../01710_projection_fix_crash.reference | 0 .../0_stateless/01710_projection_fix_crash.sql | 15 +++++++++++++++ ...65_projection_with_partition_pruning.reference | 1 + .../02965_projection_with_partition_pruning.sql | 9 +++++++++ 13 files changed, 27 insertions(+), 21 deletions(-) create mode 100644 tests/queries/0_stateless/01710_projection_fix_crash.reference create mode 100644 tests/queries/0_stateless/01710_projection_fix_crash.sql create mode 100644 tests/queries/0_stateless/02965_projection_with_partition_pruning.reference create mode 100644 tests/queries/0_stateless/02965_projection_with_partition_pruning.sql diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp index f4e2a6b7dbd1..3040f408cb56 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp @@ -618,7 +618,6 @@ bool optimizeUseAggregateProjections(QueryPlan::Node & node, QueryPlan::Nodes & reader, required_column_names, parts_with_ranges, - metadata, query_info, context, max_added_blocks, diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp index d4acf36e0012..8cb64ddf9e39 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseNormalProjection.cpp @@ -171,7 +171,6 @@ bool optimizeUseNormalProjections(Stack & stack, QueryPlan::Nodes & nodes) reader, required_columns, parts_with_ranges, - metadata, query_info, context, max_added_blocks, diff --git a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp index c3b3449857b0..9bc18ee38ba4 100644 --- a/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp +++ b/src/Processors/QueryPlan/Optimizations/projectionsCommon.cpp @@ -211,7 +211,6 @@ bool analyzeProjectionCandidate( const MergeTreeDataSelectExecutor & reader, const Names & required_column_names, const RangesInDataParts & parts_with_ranges, - const StorageMetadataPtr & metadata, const SelectQueryInfo & query_info, const ContextPtr & context, const std::shared_ptr & max_added_blocks, @@ -242,7 +241,6 @@ bool analyzeProjectionCandidate( std::move(projection_parts), nullptr, required_column_names, - metadata, candidate.projection->metadata, query_info, /// How it is actually used? I hope that for index we need only added_filter_nodes added_filter_nodes, diff --git a/src/Processors/QueryPlan/Optimizations/projectionsCommon.h b/src/Processors/QueryPlan/Optimizations/projectionsCommon.h index 055ca5d40848..22606ef8a299 100644 --- a/src/Processors/QueryPlan/Optimizations/projectionsCommon.h +++ b/src/Processors/QueryPlan/Optimizations/projectionsCommon.h @@ -73,7 +73,6 @@ bool analyzeProjectionCandidate( const MergeTreeDataSelectExecutor & reader, const Names & required_column_names, const RangesInDataParts & parts_with_ranges, - const StorageMetadataPtr & metadata, const SelectQueryInfo & query_info, const ContextPtr & context, const std::shared_ptr & max_added_blocks, diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index bc43961edf5b..566c577d687f 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -1168,7 +1168,6 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( std::move(alter_conversions), prewhere_info, filter_nodes, - storage_snapshot->metadata, metadata_for_reading, query_info, context, @@ -1354,7 +1353,6 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( std::vector alter_conversions, const PrewhereInfoPtr & prewhere_info, const ActionDAGNodes & added_filter_nodes, - const StorageMetadataPtr & metadata_snapshot_base, const StorageMetadataPtr & metadata_snapshot, const SelectQueryInfo & query_info, ContextPtr context, @@ -1375,7 +1373,6 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( return selectRangesToReadImpl( std::move(parts), std::move(alter_conversions), - metadata_snapshot_base, metadata_snapshot, updated_query_info_with_filter_dag, context, @@ -1391,7 +1388,6 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( return selectRangesToReadImpl( std::move(parts), std::move(alter_conversions), - metadata_snapshot_base, metadata_snapshot, query_info, context, @@ -1407,7 +1403,6 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToRead( MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToReadImpl( MergeTreeData::DataPartsVector parts, std::vector alter_conversions, - const StorageMetadataPtr & metadata_snapshot_base, const StorageMetadataPtr & metadata_snapshot, const SelectQueryInfo & query_info, ContextPtr context, @@ -1468,7 +1463,7 @@ MergeTreeDataSelectAnalysisResultPtr ReadFromMergeTree::selectRangesToReadImpl( parts, alter_conversions, part_values, - metadata_snapshot_base, + metadata_snapshot, data, context, max_block_numbers_to_read.get(), diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.h b/src/Processors/QueryPlan/ReadFromMergeTree.h index e3406addf6c3..b4ae10ff0c7e 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.h +++ b/src/Processors/QueryPlan/ReadFromMergeTree.h @@ -178,7 +178,6 @@ class ReadFromMergeTree final : public SourceStepWithFilter std::vector alter_conversions, const PrewhereInfoPtr & prewhere_info, const ActionDAGNodes & added_filter_nodes, - const StorageMetadataPtr & metadata_snapshot_base, const StorageMetadataPtr & metadata_snapshot, const SelectQueryInfo & query_info, ContextPtr context, @@ -228,7 +227,6 @@ class ReadFromMergeTree final : public SourceStepWithFilter static MergeTreeDataSelectAnalysisResultPtr selectRangesToReadImpl( MergeTreeData::DataPartsVector parts, std::vector alter_conversions, - const StorageMetadataPtr & metadata_snapshot_base, const StorageMetadataPtr & metadata_snapshot, const SelectQueryInfo & query_info, ContextPtr context, diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index 145875938167..99f3aed0b933 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -6536,7 +6536,6 @@ static void selectBestProjection( projection_parts, candidate.prewhere_info, candidate.required_columns, - storage_snapshot->metadata, candidate.desc->metadata, query_info, added_filter_nodes, @@ -6561,7 +6560,6 @@ static void selectBestProjection( query_info.prewhere_info, required_columns, storage_snapshot->metadata, - storage_snapshot->metadata, query_info, // TODO syntax_analysis_result set in index added_filter_nodes, query_context, @@ -7213,7 +7211,6 @@ std::optional MergeTreeData::getQueryProcessingStageWithAgg query_info.prewhere_info, analysis_result.required_columns, metadata_snapshot, - metadata_snapshot, query_info, added_filter_nodes, query_context, @@ -7246,7 +7243,6 @@ std::optional MergeTreeData::getQueryProcessingStageWithAgg query_info.prewhere_info, analysis_result.required_columns, metadata_snapshot, - metadata_snapshot, query_info, added_filter_nodes, query_context, @@ -7386,7 +7382,6 @@ bool MergeTreeData::canUseParallelReplicasBasedOnPKAnalysis( query_info.prewhere_info, storage_snapshot->getMetadataForQuery()->getColumns().getAll().getNames(), storage_snapshot->metadata, - storage_snapshot->metadata, query_info, /*added_filter_nodes*/ActionDAGNodes{}, query_context, diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index 5b844d80211d..62dc39ebc6d0 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -816,6 +816,7 @@ void MergeTreeDataSelectExecutor::filterPartsByPartition( if (metadata_snapshot->hasPartitionKey()) { + chassert(minmax_idx_condition && partition_pruner); const auto & partition_key = metadata_snapshot->getPartitionKey(); minmax_columns_types = data.getMinMaxColumnsTypes(partition_key); @@ -1231,7 +1232,6 @@ MergeTreeDataSelectAnalysisResultPtr MergeTreeDataSelectExecutor::estimateNumMar MergeTreeData::DataPartsVector parts, const PrewhereInfoPtr & prewhere_info, const Names & column_names_to_return, - const StorageMetadataPtr & metadata_snapshot_base, const StorageMetadataPtr & metadata_snapshot, const SelectQueryInfo & query_info, const ActionDAGNodes & added_filter_nodes, @@ -1260,7 +1260,6 @@ MergeTreeDataSelectAnalysisResultPtr MergeTreeDataSelectExecutor::estimateNumMar /*alter_conversions=*/ {}, prewhere_info, added_filter_nodes, - metadata_snapshot_base, metadata_snapshot, query_info, context, diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h index 05d11a858b33..53f4ceba06ac 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.h @@ -59,7 +59,6 @@ class MergeTreeDataSelectExecutor MergeTreeData::DataPartsVector parts, const PrewhereInfoPtr & prewhere_info, const Names & column_names, - const StorageMetadataPtr & metadata_snapshot_base, const StorageMetadataPtr & metadata_snapshot, const SelectQueryInfo & query_info, const ActionDAGNodes & added_filter_nodes, diff --git a/tests/queries/0_stateless/01710_projection_fix_crash.reference b/tests/queries/0_stateless/01710_projection_fix_crash.reference new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/queries/0_stateless/01710_projection_fix_crash.sql b/tests/queries/0_stateless/01710_projection_fix_crash.sql new file mode 100644 index 000000000000..703a773ebcab --- /dev/null +++ b/tests/queries/0_stateless/01710_projection_fix_crash.sql @@ -0,0 +1,15 @@ +set force_index_by_date = 1; + +create table xxxxx (col1 String, col2 String, _time DateTime, projection p (select * order by col2)) engine=MergeTree partition by col1 order by tuple(); + +create table yyyyyyy (col1 String, col2 String, _time DateTime, projection p (select * order by col2)) engine=MergeTree partition by col1 order by tuple(); + +insert into xxxxx (col1, col2, _time) values ('xxx', 'zzzz', now()+1); +insert into yyyyyyy (col1, col2, _time) values ('xxx', 'zzzz', now()); + +SELECT count() +FROM xxxxx +WHERE (col1 = 'xxx') AND (_time = ( + SELECT max(_time) + FROM yyyyyyy + WHERE (col1 = 'xxx') AND (col2 = 'zzzz') AND (_time > (now() - toIntervalDay(3))))) diff --git a/tests/queries/0_stateless/02965_projection_with_partition_pruning.reference b/tests/queries/0_stateless/02965_projection_with_partition_pruning.reference new file mode 100644 index 000000000000..5816b4eb49bb --- /dev/null +++ b/tests/queries/0_stateless/02965_projection_with_partition_pruning.reference @@ -0,0 +1 @@ +3 4 diff --git a/tests/queries/0_stateless/02965_projection_with_partition_pruning.sql b/tests/queries/0_stateless/02965_projection_with_partition_pruning.sql new file mode 100644 index 000000000000..92f7cc0671c9 --- /dev/null +++ b/tests/queries/0_stateless/02965_projection_with_partition_pruning.sql @@ -0,0 +1,9 @@ +drop table if exists a; + +create table a (i int, j int, projection p (select * order by j)) engine MergeTree partition by i order by tuple() settings index_granularity = 1; + +insert into a values (1, 2), (0, 5), (3, 4); + +select * from a where i > 0 and j = 4 settings force_index_by_date = 1; + +drop table a; From 0ed34087bdc831d908bf34fa9ebae33dfa9813d5 Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Thu, 17 Oct 2024 18:53:06 +0200 Subject: [PATCH 06/13] Bumped version to 23.8.16.42.altinitystable --- cmake/autogenerated_versions.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index 879920553ed2..3da9f1d8a216 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -8,9 +8,9 @@ SET(VERSION_MINOR 8) SET(VERSION_PATCH 16) SET(VERSION_GITHASH 060ff8e813a4a16a540063127f8c91e2108d9adf) -SET(VERSION_TWEAK 41) +SET(VERSION_TWEAK 42) SET(VERSION_FLAVOUR altinitystable) -SET(VERSION_DESCRIBE v23.8.16.41.altinitystable) -SET(VERSION_STRING 23.8.16.41.altinitystable) +SET(VERSION_DESCRIBE v23.8.16.42.altinitystable) +SET(VERSION_STRING 23.8.16.42.altinitystable) # end of autochange From 2733026b27d6fed4cd610e273fd50238f8b03807 Mon Sep 17 00:00:00 2001 From: MyroTk Date: Thu, 17 Oct 2024 15:23:22 -0700 Subject: [PATCH 07/13] Swap to snapshot runners and set pr number to 1 for scheduled runs --- .github/actions/common_setup/action.yml | 10 +++ .github/workflows/release_branches.yml | 88 ++++++++++++------------- tests/ci/docker_images_check.py | 6 +- tests/ci/pr_info.py | 4 +- 4 files changed, 58 insertions(+), 50 deletions(-) diff --git a/.github/actions/common_setup/action.yml b/.github/actions/common_setup/action.yml index b02413adc44c..ab446c3ae7ae 100644 --- a/.github/actions/common_setup/action.yml +++ b/.github/actions/common_setup/action.yml @@ -26,6 +26,16 @@ runs: echo "The GITHUB_JOB_OVERRIDDEN ENV is unset, and must be set for the nested jobs" exit 1 fi + - name: Setup zram + shell: bash + run: | + sudo modprobe zram + MemTotal=$(grep -Po "(?<=MemTotal:)\s+\d+" /proc/meminfo) # KiB + Percent=200 + ZRAM_SIZE=$(($MemTotal / 1024 / 1024 * $Percent / 100)) # Convert to GiB + .github/retry.sh 30 2 sudo zramctl --size ${ZRAM_SIZE}GiB --algorithm zstd /dev/zram0 + sudo mkswap /dev/zram0 && sudo swapon -p 100 /dev/zram0 + sudo sysctl vm.swappiness=200 - name: Setup $TEMP_PATH shell: bash run: | diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index a8113d891bd8..3d6478785505 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -30,7 +30,7 @@ on: # yamllint disable-line rule:truthy jobs: DockerHubPushAarch64: - runs-on: [self-hosted, altinity-on-demand, altinity-type-cax41, altinity-in-hel1, altinity-image-arm-system-ubuntu-22.04] + runs-on: [self-hosted, altinity-on-demand, altinity-type-cax41, altinity-in-hel1, altinity-image-arm-snapshot-22.04-arm, altinity-startup-snapshot, altinity-setup-none] steps: - name: Check out repository code uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 @@ -52,7 +52,7 @@ jobs: path: ${{ runner.temp }}/docker_images_check/changed_images_aarch64.json DockerHubPushAmd64: - runs-on: [self-hosted, altinity-on-demand, altinity-type-cpx51, altinity-in-ash, altinity-image-x86-system-ubuntu-22.04] + runs-on: [self-hosted, altinity-on-demand, altinity-type-cpx51, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none] steps: - name: Check out repository code uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 @@ -75,7 +75,7 @@ jobs: DockerHubPush: needs: [DockerHubPushAmd64, DockerHubPushAarch64] - runs-on: [self-hosted, altinity-on-demand, altinity-type-cpx51, altinity-in-ash, altinity-image-x86-system-ubuntu-22.04] + runs-on: [self-hosted, altinity-on-demand, altinity-type-cpx51, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none] steps: - name: Check out repository code uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6 @@ -119,7 +119,7 @@ jobs: secrets: inherit with: test_name: Compatibility check X86 - runner_type: altinity-on-demand, altinity-type-cpx51, altinity-in-ash, altinity-image-x86-system-ubuntu-22.04 + runner_type: altinity-on-demand, altinity-type-cpx51, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none timeout_minutes: 180 run_command: | cd "$REPO_COPY/tests/ci" @@ -131,7 +131,7 @@ jobs: secrets: inherit with: test_name: Compatibility check Aarch64 - runner_type: altinity-on-demand, altinity-type-cax41, altinity-image-arm-system-ubuntu-22.04 + runner_type: altinity-on-demand, altinity-type-cax41, altinity-image-arm-snapshot-22.04-arm, altinity-startup-snapshot, altinity-setup-none timeout_minutes: 180 run_command: | cd "$REPO_COPY/tests/ci" @@ -148,7 +148,7 @@ jobs: build_name: package_release checkout_depth: 0 timeout_minutes: 180 - runner_type: altinity-setup-builder, altinity-type-ccx53, altinity-on-demand, altinity-in-ash, altinity-image-x86-system-ubuntu-22.04 + runner_type: altinity-setup-builder, altinity-type-ccx53, altinity-on-demand, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none additional_envs: | CLICKHOUSE_STABLE_VERSION_SUFFIX=altinitystable @@ -160,7 +160,7 @@ jobs: build_name: package_aarch64 checkout_depth: 0 timeout_minutes: 180 - runner_type: altinity-setup-builder, altinity-type-ccx53, altinity-on-demand, altinity-in-ash, altinity-image-x86-system-ubuntu-22.04 + runner_type: altinity-setup-builder, altinity-type-ccx53, altinity-on-demand, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none additional_envs: | CLICKHOUSE_STABLE_VERSION_SUFFIX=altinitystable @@ -172,7 +172,7 @@ jobs: build_name: package_asan checkout_depth: 0 timeout_minutes: 180 - runner_type: altinity-on-demand, altinity-setup-builder, altinity-type-ccx53, altinity-in-ash, altinity-image-x86-system-ubuntu-22.04 + runner_type: altinity-on-demand, altinity-setup-builder, altinity-type-ccx53, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none additional_envs: | CLICKHOUSE_STABLE_VERSION_SUFFIX=altinitystable @@ -184,7 +184,7 @@ jobs: build_name: package_ubsan checkout_depth: 0 timeout_minutes: 180 - runner_type: altinity-on-demand, altinity-setup-builder, altinity-type-ccx53, altinity-in-ash, altinity-image-x86-system-ubuntu-22.04 + runner_type: altinity-on-demand, altinity-setup-builder, altinity-type-ccx53, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none additional_envs: | CLICKHOUSE_STABLE_VERSION_SUFFIX=altinitystable @@ -196,7 +196,7 @@ jobs: build_name: package_tsan checkout_depth: 0 timeout_minutes: 180 - runner_type: altinity-on-demand, altinity-setup-builder, altinity-type-ccx53, altinity-in-ash, altinity-image-x86-system-ubuntu-22.04 + runner_type: altinity-on-demand, altinity-setup-builder, altinity-type-ccx53, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none additional_envs: | CLICKHOUSE_STABLE_VERSION_SUFFIX=altinitystable @@ -208,7 +208,7 @@ jobs: build_name: package_msan checkout_depth: 0 timeout_minutes: 180 - runner_type: altinity-on-demand, altinity-setup-builder, altinity-type-ccx53, altinity-in-ash, altinity-image-x86-system-ubuntu-22.04 + runner_type: altinity-on-demand, altinity-setup-builder, altinity-type-ccx53, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none additional_envs: | CLICKHOUSE_STABLE_VERSION_SUFFIX=altinitystable @@ -220,7 +220,7 @@ jobs: build_name: package_debug checkout_depth: 0 timeout_minutes: 180 - runner_type: altinity-on-demand, altinity-setup-builder, altinity-type-ccx53, altinity-in-ash, altinity-image-x86-system-ubuntu-22.04 + runner_type: altinity-on-demand, altinity-setup-builder, altinity-type-ccx53, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none additional_envs: | CLICKHOUSE_STABLE_VERSION_SUFFIX=altinitystable @@ -231,7 +231,7 @@ jobs: needs: - BuilderDebRelease - BuilderDebAarch64 - runs-on: [self-hosted, altinity-on-demand, altinity-type-cpx41, altinity-in-ash, altinity-image-x86-system-ubuntu-22.04] + runs-on: [self-hosted, altinity-on-demand, altinity-type-cpx41, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none] timeout-minutes: 180 steps: - name: Check out repository code @@ -267,7 +267,7 @@ jobs: secrets: inherit with: test_name: ClickHouse build check - runner_type: altinity-on-demand, altinity-setup-reporter, altinity-type-cax11, altinity-in-hel1, altinity-image-arm-system-ubuntu-22.04 + runner_type: altinity-on-demand, altinity-type-cax11, altinity-in-hel1, altinity-image-arm-snapshot-22.04-arm, altinity-startup-snapshot, altinity-setup-none timeout_minutes: 180 additional_envs: | NEEDS_DATA< Date: Thu, 17 Oct 2024 23:05:51 +0000 Subject: [PATCH 08/13] Updated package maintainer metadata --- packages/clickhouse-client.yaml | 2 +- packages/clickhouse-common-static-dbg.yaml | 2 +- packages/clickhouse-common-static.yaml | 2 +- packages/clickhouse-keeper-dbg.yaml | 2 +- packages/clickhouse-keeper.yaml | 2 +- packages/clickhouse-server.yaml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/clickhouse-client.yaml b/packages/clickhouse-client.yaml index 059562835d8c..cc87aaf338f8 100644 --- a/packages/clickhouse-client.yaml +++ b/packages/clickhouse-client.yaml @@ -16,7 +16,7 @@ homepage: "https://altinity.com/" license: "Apache" section: "database" priority: "optional" -maintainer: "ClickHouse Dev Team " +maintainer: "Altinity Dev Team https://github.com/Altinity/ClickHouse/" deb: fields: Source: clickhouse diff --git a/packages/clickhouse-common-static-dbg.yaml b/packages/clickhouse-common-static-dbg.yaml index 63b95b034944..0821b0291805 100644 --- a/packages/clickhouse-common-static-dbg.yaml +++ b/packages/clickhouse-common-static-dbg.yaml @@ -16,7 +16,7 @@ homepage: "https://altinity.com/" license: "Apache" section: "database" priority: "optional" -maintainer: "ClickHouse Dev Team " +maintainer: "Altinity Dev Team https://github.com/Altinity/ClickHouse" deb: fields: Source: clickhouse diff --git a/packages/clickhouse-common-static.yaml b/packages/clickhouse-common-static.yaml index 96dd2d890a19..0140db3994b3 100644 --- a/packages/clickhouse-common-static.yaml +++ b/packages/clickhouse-common-static.yaml @@ -16,7 +16,7 @@ homepage: "https://altinity.com/" license: "Apache" section: "database" priority: "optional" -maintainer: "ClickHouse Dev Team " +maintainer: "Altinity Dev Team https://github.com/Altinity/ClickHouse" deb: fields: Source: clickhouse diff --git a/packages/clickhouse-keeper-dbg.yaml b/packages/clickhouse-keeper-dbg.yaml index c1c8a178ba74..07aaafae83ef 100644 --- a/packages/clickhouse-keeper-dbg.yaml +++ b/packages/clickhouse-keeper-dbg.yaml @@ -16,7 +16,7 @@ homepage: "https://altinity.com/" license: "Apache" section: "database" priority: "optional" -maintainer: "ClickHouse Dev Team " +maintainer: "Altinity Dev Team https://github.com/Altinity/ClickHouse" deb: fields: Source: clickhouse diff --git a/packages/clickhouse-keeper.yaml b/packages/clickhouse-keeper.yaml index f9780cd4ad9c..4fea5798ff9a 100644 --- a/packages/clickhouse-keeper.yaml +++ b/packages/clickhouse-keeper.yaml @@ -16,7 +16,7 @@ homepage: "https://altinity.com/" license: "Apache" section: "database" priority: "optional" -maintainer: "ClickHouse Dev Team " +maintainer: "Altinity Dev Team https://github.com/Altinity/ClickHouse" deb: fields: Source: clickhouse diff --git a/packages/clickhouse-server.yaml b/packages/clickhouse-server.yaml index 9a004c3eb1c6..3f396fe35137 100644 --- a/packages/clickhouse-server.yaml +++ b/packages/clickhouse-server.yaml @@ -16,7 +16,7 @@ homepage: "https://altinity.com/" license: "Apache" section: "database" priority: "optional" -maintainer: "ClickHouse Dev Team " +maintainer: "Altinity Dev Team https://github.com/Altinity/ClickHouse" deb: fields: Source: clickhouse From 875ab567a2ec89ca69806c135e41d8e084e381d4 Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Fri, 18 Oct 2024 01:43:12 +0200 Subject: [PATCH 09/13] Update 01710_projection_fix_crash.reference Fixed .reference with accordance with the reference (24.3) version output --- tests/queries/0_stateless/01710_projection_fix_crash.reference | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/queries/0_stateless/01710_projection_fix_crash.reference b/tests/queries/0_stateless/01710_projection_fix_crash.reference index e69de29bb2d1..18748286e5b8 100644 --- a/tests/queries/0_stateless/01710_projection_fix_crash.reference +++ b/tests/queries/0_stateless/01710_projection_fix_crash.reference @@ -0,0 +1 @@ +0 From 6cf2cb7a5dc2027746f5c4d9893901d477449246 Mon Sep 17 00:00:00 2001 From: Vasily Nemkov Date: Fri, 18 Oct 2024 00:02:03 +0000 Subject: [PATCH 10/13] Fixed some dependencies versions So those can be installed not only when you are in 2023. --- docker/test/integration/runner/Dockerfile | 4 ++-- docker/test/util/Dockerfile | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/test/integration/runner/Dockerfile b/docker/test/integration/runner/Dockerfile index 6fde64601eef..e0e60939e3b2 100644 --- a/docker/test/integration/runner/Dockerfile +++ b/docker/test/integration/runner/Dockerfile @@ -9,7 +9,7 @@ RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list RUN apt-get update \ && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \ adduser='3.11*' \ - ca-certificates='2023*' \ + ca-certificates \ bash='5.1-*' \ btrfs-progs='5.16.*' \ e2fsprogs='1.46.*' \ @@ -96,7 +96,7 @@ RUN python3 -m pip install --no-cache-dir \ pytest-repeat~=0.9.3 \ pytest-timeout~=2.2.0 \ pytest-xdist~=3.5.0 \ - pytz~=2023.3.post1 \ + pytz \ pyyaml~=5.3.1 \ redis~=5.0.1 \ requests-kerberos \ diff --git a/docker/test/util/Dockerfile b/docker/test/util/Dockerfile index f2041fe445c3..02e5171a34da 100644 --- a/docker/test/util/Dockerfile +++ b/docker/test/util/Dockerfile @@ -12,7 +12,7 @@ RUN apt-get update \ && apt-get install \ apt-transport-https='2.4.*' \ apt-utils='2.4.*' \ - ca-certificates='20230311ubuntu0.22.04.*' \ + ca-certificates \ curl='7.81.*' \ dnsutils='1:9.18.*' \ gnupg='2.2.*' \ From d77df9e2122c1839ec759fa08f3cfd61478d5a5f Mon Sep 17 00:00:00 2001 From: MyroTk <44327070+MyroTk@users.noreply.github.com> Date: Fri, 18 Oct 2024 09:32:34 -0700 Subject: [PATCH 11/13] Update sqllogic Dockerfile to 24.3 version --- docker/test/sqllogic/Dockerfile | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/docker/test/sqllogic/Dockerfile b/docker/test/sqllogic/Dockerfile index 508fd25d6f42..34af8825eef6 100644 --- a/docker/test/sqllogic/Dockerfile +++ b/docker/test/sqllogic/Dockerfile @@ -15,28 +15,31 @@ RUN apt-get update --yes \ unixodbc-dev \ odbcinst \ sudo \ - && apt-get clean + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* RUN pip3 install \ numpy \ pyodbc \ - deepdiff + deepdiff \ + sqlglot -ARG odbc_repo="https://github.com/ClickHouse/clickhouse-odbc.git" +ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.6.20200320/clickhouse-odbc-1.1.6-Linux.tar.gz" + +RUN mkdir -p /tmp/clickhouse-odbc-tmp \ + && cd /tmp/clickhouse-odbc-tmp \ + && curl -L ${odbc_driver_url} | tar --strip-components=1 -xz clickhouse-odbc-1.1.6-Linux \ + && mkdir /usr/local/lib64 -p \ + && cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib64/ \ + && odbcinst -i -d -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbcinst.ini.sample \ + && odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \ + && sed -i 's"=libclickhouseodbc"=/usr/local/lib64/libclickhouseodbc"' /etc/odbcinst.ini \ + && rm -rf /tmp/clickhouse-odbc-tmp -RUN git clone --recursive ${odbc_repo} \ - && mkdir -p /clickhouse-odbc/build \ - && cmake -S /clickhouse-odbc -B /clickhouse-odbc/build \ - && ls /clickhouse-odbc/build/driver \ - && make -j 10 -C /clickhouse-odbc/build \ - && ls /clickhouse-odbc/build/driver \ - && mkdir -p /usr/local/lib64/ && cp /clickhouse-odbc/build/driver/lib*.so /usr/local/lib64/ \ - && odbcinst -i -d -f /clickhouse-odbc/packaging/odbcinst.ini.sample \ - && odbcinst -i -s -l -f /clickhouse-odbc/packaging/odbc.ini.sample ENV TZ=Europe/Amsterdam -ENV MAX_RUN_TIME=900 -RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone +ENV MAX_RUN_TIME=9000 +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezon ARG sqllogic_test_repo="https://github.com/gregrahn/sqllogictest.git" From 0122a6c90278df9f5fbd3b4b0ec7b51494f3a19b Mon Sep 17 00:00:00 2001 From: MyroTk <44327070+MyroTk@users.noreply.github.com> Date: Fri, 18 Oct 2024 10:21:13 -0700 Subject: [PATCH 12/13] Relocate builder runners to avoid hetzner outage --- .github/workflows/release_branches.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml index 3d6478785505..bcc42b85f015 100644 --- a/.github/workflows/release_branches.yml +++ b/.github/workflows/release_branches.yml @@ -148,7 +148,7 @@ jobs: build_name: package_release checkout_depth: 0 timeout_minutes: 180 - runner_type: altinity-setup-builder, altinity-type-ccx53, altinity-on-demand, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none + runner_type: altinity-on-demand, altinity-type-ccx53, altinity-in-hil, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none additional_envs: | CLICKHOUSE_STABLE_VERSION_SUFFIX=altinitystable @@ -160,7 +160,7 @@ jobs: build_name: package_aarch64 checkout_depth: 0 timeout_minutes: 180 - runner_type: altinity-setup-builder, altinity-type-ccx53, altinity-on-demand, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none + runner_type: altinity-on-demand, altinity-type-ccx53, altinity-in-hil, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none additional_envs: | CLICKHOUSE_STABLE_VERSION_SUFFIX=altinitystable @@ -172,7 +172,7 @@ jobs: build_name: package_asan checkout_depth: 0 timeout_minutes: 180 - runner_type: altinity-on-demand, altinity-setup-builder, altinity-type-ccx53, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none + runner_type: altinity-on-demand, altinity-type-ccx53, altinity-in-hil, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none additional_envs: | CLICKHOUSE_STABLE_VERSION_SUFFIX=altinitystable @@ -184,7 +184,7 @@ jobs: build_name: package_ubsan checkout_depth: 0 timeout_minutes: 180 - runner_type: altinity-on-demand, altinity-setup-builder, altinity-type-ccx53, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none + runner_type: altinity-on-demand, altinity-type-ccx53, altinity-in-hil, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none additional_envs: | CLICKHOUSE_STABLE_VERSION_SUFFIX=altinitystable @@ -196,7 +196,7 @@ jobs: build_name: package_tsan checkout_depth: 0 timeout_minutes: 180 - runner_type: altinity-on-demand, altinity-setup-builder, altinity-type-ccx53, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none + runner_type: altinity-on-demand, altinity-type-ccx53, altinity-in-hil, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none additional_envs: | CLICKHOUSE_STABLE_VERSION_SUFFIX=altinitystable @@ -208,7 +208,7 @@ jobs: build_name: package_msan checkout_depth: 0 timeout_minutes: 180 - runner_type: altinity-on-demand, altinity-setup-builder, altinity-type-ccx53, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none + runner_type: altinity-on-demand, altinity-type-ccx53, altinity-in-hil, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none additional_envs: | CLICKHOUSE_STABLE_VERSION_SUFFIX=altinitystable @@ -220,7 +220,7 @@ jobs: build_name: package_debug checkout_depth: 0 timeout_minutes: 180 - runner_type: altinity-on-demand, altinity-setup-builder, altinity-type-ccx53, altinity-in-ash, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none + runner_type: altinity-on-demand, altinity-type-ccx53, altinity-in-hil, altinity-image-x86-snapshot-22.04-amd, altinity-startup-snapshot, altinity-setup-none additional_envs: | CLICKHOUSE_STABLE_VERSION_SUFFIX=altinitystable From 95383fa740614c94aec608d2acf6188e90bfb7f9 Mon Sep 17 00:00:00 2001 From: MyroTk <44327070+MyroTk@users.noreply.github.com> Date: Fri, 18 Oct 2024 17:02:06 -0700 Subject: [PATCH 13/13] Tiered Storage regression test fix --- .github/workflows/regression.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml index 66d89ddede43..66682ea320e1 100644 --- a/.github/workflows/regression.yml +++ b/.github/workflows/regression.yml @@ -525,6 +525,7 @@ jobs: uses: actions/checkout@v4 with: repository: Altinity/clickhouse-regression + ref: ${{ inputs.commit }} - name: Set envs run: | cat >> "$GITHUB_ENV" << 'EOF'